All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/8] net/ntnic: initial commit which adds register defines
@ 2023-08-16 13:25 Mykola Kostenok
  2023-08-16 13:25 ` [PATCH 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                   ` (20 more replies)
  0 siblings, 21 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-16 13:25 UTC (permalink / raw)
  To: dev; +Cc: Christian Koue Muf

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   29 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 12004 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..99569c2843
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+# cflags
+cflags += [
+    '-std=c11',
+]
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..b8113b40da
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1689706895 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 456073826 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 3051597623 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 3265543206 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 599637710 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1689706895, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 456073826, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 3051597623, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 3265543206, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 599637710, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1689706895 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1689706895, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..3948ed3ae3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_MASTER (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SLAVE (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_MASTER_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_MASTER_ID (9363L)
+#define TSM_NTTS_EXT_STAT_MASTER_REV (9364L)
+#define TSM_NTTS_EXT_STAT_MASTER_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH 2/8] net/ntnic: adds core registers and fpga functionality
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-16 13:25 ` Mykola Kostenok
  2023-08-16 13:25 ` [PATCH 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                   ` (19 subsequent siblings)
  20 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-16 13:25 UTC (permalink / raw)
  To: dev; +Cc: Christian Koue Muf

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  159 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  114 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   71 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15438 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 99569c2843..65064f44ab 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,13 +10,45 @@ cflags += [
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..f519020c8e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB busses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB busses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..d206c374b4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..fc50b1a50b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Busses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..7e782516af
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..b9109754ac
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..3850ccd934
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH 3/8] net/ntnic: adds NT200A02 adapter support
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-16 13:25 ` [PATCH 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-08-16 13:25 ` Mykola Kostenok
  2023-08-16 13:25 ` [PATCH 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                   ` (18 subsequent siblings)
  20 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-16 13:25 UTC (permalink / raw)
  To: dev; +Cc: Christian Koue Muf

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..aaacd11f1e
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_master_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_slave_instance = NULL;
+
+	nthw_pcie3 *p_pci_master = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_slave = NULL;
+
+	assert(p_master_instance || p_pci_master);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * сompare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_master,
+								    pri);
+			}
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_slave,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_enable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_enable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_enable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_enable(p_pci_slave);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_disable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_disable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_disable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_disable(p_pci_slave);
+
+			/* Post process master */
+			if (p_master_instance) {
+				nthw_hif_end_point_counters_sample(p_master_instance,
+							       pri);
+			}
+
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_master,
+								     pri);
+			}
+
+			/* Post process slave */
+			if (p_slave_instance) {
+				nthw_hif_end_point_counters_sample(p_slave_instance,
+							       sla);
+			}
+
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_slave,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_master_instance) {
+					nthw_hif_stat_req_enable(p_master_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_master_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 65064f44ab..383ff15390 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -9,22 +9,39 @@ cflags += [
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -34,6 +51,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -49,6 +67,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..c4c6779ce0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave)
+{
+	uint32_t n_block_mask = ~0U << (b_is_slave ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..b40f0a0994
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..bac4e925f9
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Master/Slave
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_MASTER, /* Adapter is master in the bonding */
+	NT_BONDING_SLAVE, /* Adapter is slave in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH 4/8] net/ntnic: adds flow related FPGA functionality
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-16 13:25 ` [PATCH 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-08-16 13:25 ` [PATCH 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-08-16 13:25 ` Mykola Kostenok
  2023-08-16 13:25 ` [PATCH 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                   ` (17 subsequent siblings)
  20 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-16 13:25 UTC (permalink / raw)
  To: dev; +Cc: Christian Koue Muf

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  686 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8732 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 383ff15390..c184d5d4b5 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -17,6 +17,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -58,6 +59,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..6477debd46
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,686 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, _val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		__typeof__(_val) (val) = (_val); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH 5/8] net/ntnic: adds FPGA abstraction layer
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (2 preceding siblings ...)
  2023-08-16 13:25 ` [PATCH 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-08-16 13:25 ` Mykola Kostenok
  2023-08-16 13:25 ` [PATCH 6/8] net/ntnic: adds flow logic Mykola Kostenok
                   ` (16 subsequent siblings)
  20 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-16 13:25 UTC (permalink / raw)
  To: dev; +Cc: Christian Koue Muf

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1438 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  274 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14389 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index c184d5d4b5..387481bb4a 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -17,6 +17,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..a36168a115
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->alloced_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->alloced_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..099e790c81
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int alloced_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..bee12b71f7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPCIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..3727707446
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1438 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+	int num_qwords = 0;
+	int num_swords = 0;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		num_qwords++;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		num_swords++;
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..311e39ba36
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+#define FLM_V17_MBR_ID1(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->a)
+#define FLM_V17_MBR_ID2(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->b)
+#define FLM_V17_MBR_ID3(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->a)
+#define FLM_V17_MBR_ID4(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->b)
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH 6/8] net/ntnic: adds flow logic
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (3 preceding siblings ...)
  2023-08-16 13:25 ` [PATCH 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-08-16 13:25 ` Mykola Kostenok
  2023-08-16 13:25 ` [PATCH 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
                   ` (15 subsequent siblings)
  20 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-16 13:25 UTC (permalink / raw)
  To: dev; +Cc: Christian Koue Muf

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1306 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5118 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10078 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 387481bb4a..a9892615c9 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -60,8 +60,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -80,6 +82,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..2598e1e27b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..f4b86899a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPHY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..782988554b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdatomic.h>
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_excisting)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_excisting = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_excisting = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	volatile atomic_uint_fast64_t n_pkt;
+	volatile atomic_uint_fast64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	volatile atomic_uint_fast64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		atomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	atomic_store(&mtr_stat[mtr_id].stats_mask, 0);
+	atomic_store(&mtr_stat[mtr_id].n_bytes, 0);
+	atomic_store(&mtr_stat[mtr_id].n_pkt, 0);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (atomic_load(&mtr_stat->stats_mask))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				atomic_load(&mtr_stat[id].stats_mask);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				atomic_store(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB);
+				atomic_store(&mtr_stat[id].n_bytes, nb);
+				atomic_store(&mtr_stat[id].n_pkt, np);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = atomic_load(&mtr_stat[id].stats_mask);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = atomic_load(&mtr_stat[id].n_pkt);
+			} while (pkt_1 & UINT64_MSB);
+			nb = atomic_load(&mtr_stat[id].n_bytes);
+			pkt_2 = atomic_load(&mtr_stat[id].n_pkt);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+	if (mtr_ids) {
+		FLM_V17_MBR_ID1(learn_record.mbr_idx) = mtr_ids[0];
+		FLM_V17_MBR_ID2(learn_record.mbr_idx) = mtr_ids[1];
+		FLM_V17_MBR_ID3(learn_record.mbr_idx) = mtr_ids[2];
+		FLM_V17_MBR_ID4(learn_record.mbr_idx) = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_excisting = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_excisting)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_excisting;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_excisting;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			atomic_init(&mtr_stat[i].n_pkt, 0);
+			atomic_init(&mtr_stat[i].n_bytes, 0);
+			atomic_init(&mtr_stat[i].stats_mask, 0);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (4 preceding siblings ...)
  2023-08-16 13:25 ` [PATCH 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-08-16 13:25 ` Mykola Kostenok
  2023-08-16 13:25 ` [PATCH 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
                   ` (14 subsequent siblings)
  20 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-16 13:25 UTC (permalink / raw)
  To: dev; +Cc: Christian Koue Muf

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   56 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4258 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  357 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1194 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1246 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 28 files changed, 12525 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 8c3f2c993f..02aca74173 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..7cea4bb9d0
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,56 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+ARMv7                =
+ARMv8                =
+LoongArch64          =
+Power8               =
+rv64                 =
+x86-32               =
+x86-64               =
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index a9892615c9..faaba95af3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -26,6 +26,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -112,6 +115,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..dd5d4ad918
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4258 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdatomic.h>
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static inline __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_alloced;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_alloced;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_alloced=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_alloced, num, MAX_TOTAL_QUEUES);
+	if (num_queues_alloced + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_alloced += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	uint64_t n_loop_iter_cnt = 0;
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+		n_loop_iter_cnt++;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 };
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id = start_queue + i;
+			queue_ids[i].hw_id = queue_ids[i].id;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id = start_queue + i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	atomic_store(&internals->vhid, vhid);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..a82027cbe7
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <stdatomic.h>
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	volatile atomic_int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..77f01900f8
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1194 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#pragma GCC diagnostic ignored "-Wunused-function"
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_intialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_intialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_intialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_intialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_intialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#pragma GCC diagnostic pop
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..b1cc4d2959
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DONT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..c520a43c44
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#if RTE_VERSION_NUM(23, 3, 0, 99) > RTE_VERSION
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES            \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#else
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#endif
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile atomic_int_fast32_t started;
+	volatile atomic_int_fast32_t dev_attached;
+	volatile atomic_int_fast32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile atomic_int_fast32_t dma_mapped;
+	volatile atomic_int_fast32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && atomic_load(&internal->dma_mapped)) ||
+			(!do_map && !atomic_load(&internal->dma_mapped))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 1);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 0);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (atomic_load(&internal->intr_enabled))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 1);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!atomic_load(&internal->intr_enabled))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 0);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!atomic_load(&internal->running) &&
+			(atomic_load(&internal->started) &&
+			 atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 1);
+	} else if (atomic_load(&internal->running) &&
+			(!atomic_load(&internal->started) ||
+			 !atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 0);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	atomic_store(&internal->dev_attached, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	atomic_store(&internal->dev_attached, 0);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	atomic_store(&internal->started, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	atomic_store(&internal->started, 0);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..e034e33c89
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = { 0 };
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH 8/8] net/ntnic: adds socket connection to PMD
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (5 preceding siblings ...)
  2023-08-16 13:25 ` [PATCH 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-16 13:25 ` Mykola Kostenok
  2023-08-16 14:46   ` Stephen Hemminger
  2023-08-16 14:47   ` Stephen Hemminger
  2023-08-17 14:43 ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (13 subsequent siblings)
  20 siblings, 2 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-16 13:25 UTC (permalink / raw)
  To: dev; +Cc: Christian Koue Muf

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1310 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  876 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5354 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index faaba95af3..262ce436b9 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -1,11 +1,27 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2020-2023 Napatech A/S
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
 # cflags
 cflags += [
     '-std=c11',
 ]
 
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -19,6 +35,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -40,6 +57,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..2362d440b4
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_AVTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..51f0577194
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	*(uint32_t *)err_buf = (uint32_t)ntcerr->err_code;
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..60753d6ef5
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_AVTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..31d5dc3edc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 7 && memcmp(tok, "in_port=", 5) == 0)
+			in_port = atoi(tok + 7);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vpath=", 6) == 0)
+			strlcpy(vpath, tok + 6, MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..1b3e59fcc1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,876 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *que =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		que->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		que->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		que->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		que->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		que->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		que->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		que->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		que->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		que++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec, size;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* Re: [PATCH 8/8] net/ntnic: adds socket connection to PMD
  2023-08-16 13:25 ` [PATCH 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
@ 2023-08-16 14:46   ` Stephen Hemminger
  2023-08-25 13:52     ` Christian Koue Muf
  2023-08-16 14:47   ` Stephen Hemminger
  1 sibling, 1 reply; 142+ messages in thread
From: Stephen Hemminger @ 2023-08-16 14:46 UTC (permalink / raw)
  To: Mykola Kostenok; +Cc: dev, Christian Koue Muf

On Wed, 16 Aug 2023 15:25:52 +0200
Mykola Kostenok <mko-plv@napatech.com> wrote:

> From: Christian Koue Muf <ckm@napatech.com>
> 
> The socket connection is used by Napatech's tools for monitoring
> and rte_flow programming from other processes.
> 
> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>

I would prefer that this be general and work with other PMD's.
Why is existing telemetry model not good enough?

^ permalink raw reply	[flat|nested] 142+ messages in thread

* Re: [PATCH 8/8] net/ntnic: adds socket connection to PMD
  2023-08-16 13:25 ` [PATCH 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  2023-08-16 14:46   ` Stephen Hemminger
@ 2023-08-16 14:47   ` Stephen Hemminger
  1 sibling, 0 replies; 142+ messages in thread
From: Stephen Hemminger @ 2023-08-16 14:47 UTC (permalink / raw)
  To: Mykola Kostenok; +Cc: dev, Christian Koue Muf

On Wed, 16 Aug 2023 15:25:52 +0200
Mykola Kostenok <mko-plv@napatech.com> wrote:

> From: Christian Koue Muf <ckm@napatech.com>
> 
> The socket connection is used by Napatech's tools for monitoring
> and rte_flow programming from other processes.
> 
> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>

Also, a proprietary socket API introduces a whole new security attack surface.

^ permalink raw reply	[flat|nested] 142+ messages in thread

* [PATCH v2 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (6 preceding siblings ...)
  2023-08-16 13:25 ` [PATCH 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
@ 2023-08-17 14:43 ` Mykola Kostenok
  2023-08-17 14:43   ` [PATCH v2 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (7 more replies)
  2023-08-18 18:41 ` [PATCH v4 " Mykola Kostenok
                   ` (12 subsequent siblings)
  20 siblings, 8 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-17 14:43 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   29 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 12004 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..99569c2843
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+# cflags
+cflags += [
+    '-std=c11',
+]
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..b8113b40da
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1689706895 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 456073826 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 3051597623 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 3265543206 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 599637710 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1689706895, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 456073826, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 3051597623, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 3265543206, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 599637710, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1689706895 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1689706895, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..3948ed3ae3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_MASTER (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SLAVE (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_MASTER_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_MASTER_ID (9363L)
+#define TSM_NTTS_EXT_STAT_MASTER_REV (9364L)
+#define TSM_NTTS_EXT_STAT_MASTER_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v2 2/8] net/ntnic: adds core registers and fpga functionality
  2023-08-17 14:43 ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-17 14:43   ` Mykola Kostenok
  2023-08-17 14:43   ` [PATCH v2 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (6 subsequent siblings)
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-17 14:43 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  114 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   71 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15440 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 99569c2843..65064f44ab 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,13 +10,45 @@ cflags += [
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..d206c374b4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..fc50b1a50b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Busses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..b9109754ac
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..3850ccd934
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v2 3/8] net/ntnic: adds NT200A02 adapter support
  2023-08-17 14:43 ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-17 14:43   ` [PATCH v2 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-08-17 14:43   ` Mykola Kostenok
  2023-08-17 14:43   ` [PATCH v2 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-17 14:43 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..07884e9219
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_master_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_slave_instance = NULL;
+
+	nthw_pcie3 *p_pci_master = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_slave = NULL;
+
+	assert(p_master_instance || p_pci_master);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_master,
+								    pri);
+			}
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_slave,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_enable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_enable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_enable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_enable(p_pci_slave);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_disable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_disable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_disable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_disable(p_pci_slave);
+
+			/* Post process master */
+			if (p_master_instance) {
+				nthw_hif_end_point_counters_sample(p_master_instance,
+							       pri);
+			}
+
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_master,
+								     pri);
+			}
+
+			/* Post process slave */
+			if (p_slave_instance) {
+				nthw_hif_end_point_counters_sample(p_slave_instance,
+							       sla);
+			}
+
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_slave,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_master_instance) {
+					nthw_hif_stat_req_enable(p_master_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_master_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 65064f44ab..383ff15390 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -9,22 +9,39 @@ cflags += [
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -34,6 +51,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -49,6 +67,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..c4c6779ce0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave)
+{
+	uint32_t n_block_mask = ~0U << (b_is_slave ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..b40f0a0994
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..bac4e925f9
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Master/Slave
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_MASTER, /* Adapter is master in the bonding */
+	NT_BONDING_SLAVE, /* Adapter is slave in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v2 4/8] net/ntnic: adds flow related FPGA functionality
  2023-08-17 14:43 ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-17 14:43   ` [PATCH v2 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-08-17 14:43   ` [PATCH v2 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-08-17 14:43   ` Mykola Kostenok
  2023-08-17 14:43   ` [PATCH v2 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (4 subsequent siblings)
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-17 14:43 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 383ff15390..c184d5d4b5 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -17,6 +17,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -58,6 +59,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v2 5/8] net/ntnic: adds FPGA abstraction layer
  2023-08-17 14:43 ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-08-17 14:43   ` [PATCH v2 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-08-17 14:43   ` Mykola Kostenok
  2023-08-17 14:43   ` [PATCH v2 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (3 subsequent siblings)
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-17 14:43 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  274 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14385 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index c184d5d4b5..387481bb4a 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -17,6 +17,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..bee12b71f7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPCIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..311e39ba36
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+#define FLM_V17_MBR_ID1(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->a)
+#define FLM_V17_MBR_ID2(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->b)
+#define FLM_V17_MBR_ID3(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->a)
+#define FLM_V17_MBR_ID4(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->b)
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v2 6/8] net/ntnic: adds flow logic
  2023-08-17 14:43 ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-08-17 14:43   ` [PATCH v2 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-08-17 14:43   ` Mykola Kostenok
  2023-08-17 14:43   ` [PATCH v2 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
                     ` (2 subsequent siblings)
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-17 14:43 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1306 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5118 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10078 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 387481bb4a..a9892615c9 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -60,8 +60,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -80,6 +82,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..2598e1e27b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..fa9240cb7b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdatomic.h>
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	volatile atomic_uint_fast64_t n_pkt;
+	volatile atomic_uint_fast64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	volatile atomic_uint_fast64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		atomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	atomic_store(&mtr_stat[mtr_id].stats_mask, 0);
+	atomic_store(&mtr_stat[mtr_id].n_bytes, 0);
+	atomic_store(&mtr_stat[mtr_id].n_pkt, 0);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (atomic_load(&mtr_stat->stats_mask))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				atomic_load(&mtr_stat[id].stats_mask);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				atomic_store(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB);
+				atomic_store(&mtr_stat[id].n_bytes, nb);
+				atomic_store(&mtr_stat[id].n_pkt, np);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = atomic_load(&mtr_stat[id].stats_mask);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = atomic_load(&mtr_stat[id].n_pkt);
+			} while (pkt_1 & UINT64_MSB);
+			nb = atomic_load(&mtr_stat[id].n_bytes);
+			pkt_2 = atomic_load(&mtr_stat[id].n_pkt);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+	if (mtr_ids) {
+		FLM_V17_MBR_ID1(learn_record.mbr_idx) = mtr_ids[0];
+		FLM_V17_MBR_ID2(learn_record.mbr_idx) = mtr_ids[1];
+		FLM_V17_MBR_ID3(learn_record.mbr_idx) = mtr_ids[2];
+		FLM_V17_MBR_ID4(learn_record.mbr_idx) = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			atomic_init(&mtr_stat[i].n_pkt, 0);
+			atomic_init(&mtr_stat[i].n_bytes, 0);
+			atomic_init(&mtr_stat[i].stats_mask, 0);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v2 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-17 14:43 ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-08-17 14:43   ` [PATCH v2 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-08-17 14:43   ` Mykola Kostenok
  2023-08-17 14:43   ` [PATCH v2 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  2023-08-17 22:08   ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Tyler Retzlaff
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-17 14:43 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4258 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  357 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1194 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1246 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 28 files changed, 12519 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 8c3f2c993f..02aca74173 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index a9892615c9..faaba95af3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -26,6 +26,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -112,6 +115,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..8989fa5ea8
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4258 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdatomic.h>
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static inline __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	uint64_t n_loop_iter_cnt = 0;
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+		n_loop_iter_cnt++;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 };
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id = start_queue + i;
+			queue_ids[i].hw_id = queue_ids[i].id;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id = start_queue + i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	atomic_store(&internals->vhid, vhid);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..a82027cbe7
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <stdatomic.h>
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	volatile atomic_int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..0d3dfdd686
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1194 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#pragma GCC diagnostic ignored "-Wunused-function"
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#pragma GCC diagnostic pop
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..b1cc4d2959
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DONT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..c520a43c44
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#if RTE_VERSION_NUM(23, 3, 0, 99) > RTE_VERSION
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES            \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#else
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#endif
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile atomic_int_fast32_t started;
+	volatile atomic_int_fast32_t dev_attached;
+	volatile atomic_int_fast32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile atomic_int_fast32_t dma_mapped;
+	volatile atomic_int_fast32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && atomic_load(&internal->dma_mapped)) ||
+			(!do_map && !atomic_load(&internal->dma_mapped))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 1);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 0);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (atomic_load(&internal->intr_enabled))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 1);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!atomic_load(&internal->intr_enabled))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 0);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!atomic_load(&internal->running) &&
+			(atomic_load(&internal->started) &&
+			 atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 1);
+	} else if (atomic_load(&internal->running) &&
+			(!atomic_load(&internal->started) ||
+			 !atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 0);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	atomic_store(&internal->dev_attached, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	atomic_store(&internal->dev_attached, 0);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	atomic_store(&internal->started, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	atomic_store(&internal->started, 0);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..e034e33c89
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = { 0 };
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v2 8/8] net/ntnic: adds socket connection to PMD
  2023-08-17 14:43 ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-08-17 14:43   ` [PATCH v2 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-17 14:43   ` Mykola Kostenok
  2023-08-17 22:08   ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Tyler Retzlaff
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-17 14:43 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1310 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  876 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5354 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index faaba95af3..262ce436b9 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -1,11 +1,27 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2020-2023 Napatech A/S
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
 # cflags
 cflags += [
     '-std=c11',
 ]
 
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -19,6 +35,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -40,6 +57,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..51f0577194
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	*(uint32_t *)err_buf = (uint32_t)ntcerr->err_code;
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..31d5dc3edc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 7 && memcmp(tok, "in_port=", 5) == 0)
+			in_port = atoi(tok + 7);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vpath=", 6) == 0)
+			strlcpy(vpath, tok + 6, MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..5c8b8db39e
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,876 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec, size;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* Re: [PATCH v2 1/8] net/ntnic: initial commit which adds register defines
  2023-08-17 14:43 ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (6 preceding siblings ...)
  2023-08-17 14:43   ` [PATCH v2 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
@ 2023-08-17 22:08   ` Tyler Retzlaff
  2023-08-18 11:01     ` Mykola Kostenok
  7 siblings, 1 reply; 142+ messages in thread
From: Tyler Retzlaff @ 2023-08-17 22:08 UTC (permalink / raw)
  To: Mykola Kostenok; +Cc: dev, ckm

On Thu, Aug 17, 2023 at 04:43:05PM +0200, Mykola Kostenok wrote:
> From: Christian Koue Muf <ckm@napatech.com>
> 
> The NTNIC PMD does not rely on a kernel space Napatech driver,
> thus all defines related to the register layout is part of the PMD
> code, which will be added in later commits.
> 
> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
> ---
>  drivers/net/meson.build                       |    1 +
>  drivers/net/ntnic/include/fpga_model.h        |   99 +
>  drivers/net/ntnic/meson.build                 |   29 +
>  drivers/net/ntnic/nthw/nthw_register.h        |   19 +
>  .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
>  .../nthw/supported/nthw_fpga_instances.h      |   14 +
>  .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
>  .../supported/nthw_fpga_parameters_defs.h     |  209 +
>  .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
>  9 files changed, 12004 insertions(+)
>  create mode 100644 drivers/net/ntnic/include/fpga_model.h
>  create mode 100644 drivers/net/ntnic/meson.build
>  create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
>  create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
>  create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
>  create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
>  create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
>  create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
> 

...

> diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
> new file mode 100644
> index 0000000000..99569c2843
> --- /dev/null
> +++ b/drivers/net/ntnic/meson.build
> @@ -0,0 +1,29 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(c) 2020-2023 Napatech A/S
> +
> +# cflags
> +cflags += [
> +    '-std=c11',
> +]

shouldn't be needed dpdk requires c11 conformant compiler and configures
it in the root project meson.build with c_std=c11


^ permalink raw reply	[flat|nested] 142+ messages in thread

* RE: [PATCH v2 1/8] net/ntnic: initial commit which adds register defines
  2023-08-17 22:08   ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Tyler Retzlaff
@ 2023-08-18 11:01     ` Mykola Kostenok
  0 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-18 11:01 UTC (permalink / raw)
  To: Tyler Retzlaff; +Cc: dev, Christian Koue Muf

Thanks. Good catch. Accepted.

> -----Original Message-----
> From: Tyler Retzlaff <roretzla@linux.microsoft.com>
> Sent: Friday, August 18, 2023 1:09 AM
> To: Mykola Kostenok <mko-plv@napatech.com>
> Cc: dev@dpdk.org; Christian Koue Muf <ckm@napatech.com>
> Subject: Re: [PATCH v2 1/8] net/ntnic: initial commit which adds register
> defines
> 
> On Thu, Aug 17, 2023 at 04:43:05PM +0200, Mykola Kostenok wrote:
> > From: Christian Koue Muf <ckm@napatech.com>
> >
> > The NTNIC PMD does not rely on a kernel space Napatech driver, thus
> > all defines related to the register layout is part of the PMD code,
> > which will be added in later commits.
> >
> > Signed-off-by: Christian Koue Muf <ckm@napatech.com>
> > Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
> > ---
> >
> drivers/net/https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fmeson.bu
> ild&c=E,1,c156wt4VMYHsQ_FY06uE-B1pGyaYNwecOQ-XgYTxQ-
> sEVxXYcRzmo8p5zvB3AoN4L_0P4vtVDdYAe8fufFu83lS8RGCTi26L_5wJg6s9-
> x5sIzJg&typo=1                       |    1 +
> >  drivers/net/ntnic/include/fpga_model.h        |   99 +
> >
> drivers/net/ntnic/https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fme
> son.build&c=E,1,ZYui3lhpAaB9drSKM2VpYXdH_wzEWBpNMmZdLjmC19weBv
> afwLSTVbJz-
> CEi4jjp1YlftAt12gX0UAKmypCNhhDaAZNmziRKxEv3a69raQ,,&typo=1
> |   29 +
> >  drivers/net/ntnic/nthw/nthw_register.h        |   19 +
> >  .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
> >  .../nthw/supported/nthw_fpga_instances.h      |   14 +
> >  .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
> >  .../supported/nthw_fpga_parameters_defs.h     |  209 +
> >  .../nthw/supported/nthw_fpga_registers_defs.h | 7277
> > +++++++++++++++++
> >  9 files changed, 12004 insertions(+)
> >  create mode 100644 drivers/net/ntnic/include/fpga_model.h
> >  create mode 100644
> > drivers/net/ntnic/https://linkprotect.cudasvc.com/url?a=https%3a%2f%2f
> >
> meson.build&c=E,1,qiO2jreaEnViAwfKF0yAabA5ltx6OuQTyXPKL1CEOtHAJNv4
> 1EcJ
> > fd8PkWWUu7_1joAueaPENflIstjA1nB7iHDBk8g3mUvawgO-
> g6qgJw,,&typo=1
> >  create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
> >  create mode 100644
> > drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
> >  create mode 100644
> > drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
> >  create mode 100644
> > drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
> >  create mode 100644
> > drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
> >  create mode 100644
> > drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
> >
> 
> ...
> 
> > diff --git
> > a/drivers/net/ntnic/https://linkprotect.cudasvc.com/url?a=https%3a%2f%
> > 2fmeson.build&c=E,1,wjptlM2nMhsaqMywVruBxBf7Iwa4gnfYg0S6VTIF-
> NwOmhEZbH
> > kPCwsilEoTcVmtpWO81GLKY8FuRSm-qE1r5zKd-
> 17oCsAXdsYuPlaJNTlyx6WJy1D4zO9x
> > l5g,&typo=1
> > b/drivers/net/ntnic/https://linkprotect.cudasvc.com/url?a=https%3a%2f%
> > 2fmeson.build&c=E,1,Vr72t7YiUD4v0n-
> Lhyp4R59sZz3UaPNjF0lvx7NtZF12XRido9
> > eNWpHDZ-yu-163BxDRG_bu6Hrd47xG6AdPy_U51jZkjSS68uUh0DYrhZxK-
> qnNuORuGoE,
> > &typo=1
> > new file mode 100644
> > index 0000000000..99569c2843
> > --- /dev/null
> > +++ b/drivers/net/ntnic/https://linkprotect.cudasvc.com/url?a=https%3a
> > +++ %2f%2fmeson.build&c=E,1,cf8efsbrpf4Grj2N8OcSe-
> QexkKqF2_tzOZ4hOLUzB
> > +++
> RYRr6AjKPAC6xDsNYOzZm5fReuyiydIkeuWUs4tdzMJR79dQs_0r2jDA11AToOX
> ZOI
> > +++ xUW5VSKbc0Qz&typo=1
> > @@ -0,0 +1,29 @@
> > +# SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2020-2023
> > +Napatech A/S
> > +
> > +# cflags
> > +cflags += [
> > +    '-std=c11',
> > +]
> 
> shouldn't be needed dpdk requires c11 conformant compiler and configures it
> in the root project
> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fmeson.build&c=E,1,U
> ys7NUWLsmYbHeZHmrhnTMRhOY3f5VWdjL-nsQdprcA9-
> 4V3RJhX5rDcrpNRQ8ajaA4EYFBy7WUFJQcEVsZPnN69jt6S-
> ZrbNuvDTeEbvwjaYLsUjwUSg0Y,&typo=1 with c_std=c11


^ permalink raw reply	[flat|nested] 142+ messages in thread

* [PATCH v4 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (7 preceding siblings ...)
  2023-08-17 14:43 ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-18 18:41 ` Mykola Kostenok
  2023-08-18 18:41   ` [PATCH v4 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-08-21 11:34 ` [PATCH v5 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (11 subsequent siblings)
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-18 18:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 11999 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..686d344d91
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..b8113b40da
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1689706895 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 456073826 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 3051597623 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 3265543206 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 599637710 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1689706895, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 456073826, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 3051597623, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 3265543206, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 599637710, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1689706895 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1689706895, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..3948ed3ae3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_MASTER (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SLAVE (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_MASTER_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_MASTER_ID (9363L)
+#define TSM_NTTS_EXT_STAT_MASTER_REV (9364L)
+#define TSM_NTTS_EXT_STAT_MASTER_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v4 2/8] net/ntnic: adds core registers and fpga functionality
  2023-08-18 18:41 ` [PATCH v4 " Mykola Kostenok
@ 2023-08-18 18:41   ` Mykola Kostenok
  2023-08-18 18:41   ` [PATCH v4 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-18 18:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   71 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15441 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 686d344d91..46913c0c74 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -5,13 +5,45 @@
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..3850ccd934
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v4 3/8] net/ntnic: adds NT200A02 adapter support
  2023-08-18 18:41 ` [PATCH v4 " Mykola Kostenok
  2023-08-18 18:41   ` [PATCH v4 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-08-18 18:41   ` Mykola Kostenok
  2023-08-18 18:41   ` [PATCH v4 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-18 18:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..07884e9219
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_master_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_slave_instance = NULL;
+
+	nthw_pcie3 *p_pci_master = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_slave = NULL;
+
+	assert(p_master_instance || p_pci_master);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_master,
+								    pri);
+			}
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_slave,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_enable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_enable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_enable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_enable(p_pci_slave);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_disable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_disable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_disable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_disable(p_pci_slave);
+
+			/* Post process master */
+			if (p_master_instance) {
+				nthw_hif_end_point_counters_sample(p_master_instance,
+							       pri);
+			}
+
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_master,
+								     pri);
+			}
+
+			/* Post process slave */
+			if (p_slave_instance) {
+				nthw_hif_end_point_counters_sample(p_slave_instance,
+							       sla);
+			}
+
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_slave,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_master_instance) {
+					nthw_hif_stat_req_enable(p_master_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_master_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 46913c0c74..ae43254f9f 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -4,22 +4,39 @@
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -29,6 +46,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -44,6 +62,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..c4c6779ce0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave)
+{
+	uint32_t n_block_mask = ~0U << (b_is_slave ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..b40f0a0994
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..bac4e925f9
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Master/Slave
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_MASTER, /* Adapter is master in the bonding */
+	NT_BONDING_SLAVE, /* Adapter is slave in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v4 4/8] net/ntnic: adds flow related FPGA functionality
  2023-08-18 18:41 ` [PATCH v4 " Mykola Kostenok
  2023-08-18 18:41   ` [PATCH v4 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-08-18 18:41   ` [PATCH v4 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-08-18 18:41   ` Mykola Kostenok
  2023-08-18 18:41   ` [PATCH v4 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-18 18:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index ae43254f9f..085bcf768e 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -12,6 +12,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -53,6 +54,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v4 5/8] net/ntnic: adds FPGA abstraction layer
  2023-08-18 18:41 ` [PATCH v4 " Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-08-18 18:41   ` [PATCH v4 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-08-18 18:41   ` Mykola Kostenok
  2023-08-18 18:41   ` [PATCH v4 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-18 18:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  274 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14385 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 085bcf768e..bdfaf75190 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -12,6 +12,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -54,6 +55,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..bee12b71f7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPCIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..311e39ba36
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+#define FLM_V17_MBR_ID1(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->a)
+#define FLM_V17_MBR_ID2(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->b)
+#define FLM_V17_MBR_ID3(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->a)
+#define FLM_V17_MBR_ID4(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->b)
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v4 6/8] net/ntnic: adds flow logic
  2023-08-18 18:41 ` [PATCH v4 " Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-08-18 18:41   ` [PATCH v4 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-08-18 18:41   ` Mykola Kostenok
  2023-08-18 18:41   ` [PATCH v4 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-08-18 18:41   ` [PATCH v4 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-18 18:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1306 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5118 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10078 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index bdfaf75190..fde385d929 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -55,8 +55,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -75,6 +77,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..2598e1e27b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..fa9240cb7b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdatomic.h>
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	volatile atomic_uint_fast64_t n_pkt;
+	volatile atomic_uint_fast64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	volatile atomic_uint_fast64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		atomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	atomic_store(&mtr_stat[mtr_id].stats_mask, 0);
+	atomic_store(&mtr_stat[mtr_id].n_bytes, 0);
+	atomic_store(&mtr_stat[mtr_id].n_pkt, 0);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (atomic_load(&mtr_stat->stats_mask))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				atomic_load(&mtr_stat[id].stats_mask);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				atomic_store(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB);
+				atomic_store(&mtr_stat[id].n_bytes, nb);
+				atomic_store(&mtr_stat[id].n_pkt, np);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = atomic_load(&mtr_stat[id].stats_mask);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = atomic_load(&mtr_stat[id].n_pkt);
+			} while (pkt_1 & UINT64_MSB);
+			nb = atomic_load(&mtr_stat[id].n_bytes);
+			pkt_2 = atomic_load(&mtr_stat[id].n_pkt);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+	if (mtr_ids) {
+		FLM_V17_MBR_ID1(learn_record.mbr_idx) = mtr_ids[0];
+		FLM_V17_MBR_ID2(learn_record.mbr_idx) = mtr_ids[1];
+		FLM_V17_MBR_ID3(learn_record.mbr_idx) = mtr_ids[2];
+		FLM_V17_MBR_ID4(learn_record.mbr_idx) = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			atomic_init(&mtr_stat[i].n_pkt, 0);
+			atomic_init(&mtr_stat[i].n_bytes, 0);
+			atomic_init(&mtr_stat[i].stats_mask, 0);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v4 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-18 18:41 ` [PATCH v4 " Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-08-18 18:41   ` [PATCH v4 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-08-18 18:41   ` Mykola Kostenok
  2023-08-18 18:41   ` [PATCH v4 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-18 18:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  357 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1246 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 28 files changed, 12513 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 8c3f2c993f..02aca74173 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index fde385d929..40ab25899e 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -21,6 +21,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -107,6 +110,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..ce07d5a8cd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdatomic.h>
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 };
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id = start_queue + i;
+			queue_ids[i].hw_id = queue_ids[i].id;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id = start_queue + i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	atomic_store(&internals->vhid, vhid);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..a82027cbe7
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <stdatomic.h>
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	volatile atomic_int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..b1cc4d2959
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DONT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..c520a43c44
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#if RTE_VERSION_NUM(23, 3, 0, 99) > RTE_VERSION
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES            \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#else
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#endif
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile atomic_int_fast32_t started;
+	volatile atomic_int_fast32_t dev_attached;
+	volatile atomic_int_fast32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile atomic_int_fast32_t dma_mapped;
+	volatile atomic_int_fast32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && atomic_load(&internal->dma_mapped)) ||
+			(!do_map && !atomic_load(&internal->dma_mapped))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 1);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 0);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (atomic_load(&internal->intr_enabled))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 1);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!atomic_load(&internal->intr_enabled))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 0);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!atomic_load(&internal->running) &&
+			(atomic_load(&internal->started) &&
+			 atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 1);
+	} else if (atomic_load(&internal->running) &&
+			(!atomic_load(&internal->started) ||
+			 !atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 0);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	atomic_store(&internal->dev_attached, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	atomic_store(&internal->dev_attached, 0);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	atomic_store(&internal->started, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	atomic_store(&internal->started, 0);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..e034e33c89
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = { 0 };
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v4 8/8] net/ntnic: adds socket connection to PMD
  2023-08-18 18:41 ` [PATCH v4 " Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-08-18 18:41   ` [PATCH v4 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-18 18:41   ` Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-18 18:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1310 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  876 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5354 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 40ab25899e..b09bb91914 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -1,6 +1,22 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2020-2023 Napatech A/S
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -14,6 +30,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -35,6 +52,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..22ef7828c7
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	*(uint32_t *)err_buf = (uint32_t)ntcerr->err_code;
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..31d5dc3edc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 7 && memcmp(tok, "in_port=", 5) == 0)
+			in_port = atoi(tok + 7);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vpath=", 6) == 0)
+			strlcpy(vpath, tok + 6, MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..5c8b8db39e
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,876 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec, size;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v5 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (8 preceding siblings ...)
  2023-08-18 18:41 ` [PATCH v4 " Mykola Kostenok
@ 2023-08-21 11:34 ` Mykola Kostenok
  2023-08-21 11:34   ` [PATCH v5 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-08-21 13:54 ` [PATCH v6 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (10 subsequent siblings)
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 11:34 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
v5:
* Disable build for unsupported platforms.
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   30 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 12005 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..f83c868a57
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64') or not dpdk_conf.has('RTE_ARCH_64')
+    build = false
+    reason = 'only supported on x86_64 and aarch64 linux'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..b8113b40da
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1689706895 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 456073826 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 3051597623 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 3265543206 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 599637710 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1689706895, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 456073826, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 3051597623, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 3265543206, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 599637710, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1689706895 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1689706895, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..3948ed3ae3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_MASTER (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SLAVE (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_MASTER_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_MASTER_ID (9363L)
+#define TSM_NTTS_EXT_STAT_MASTER_REV (9364L)
+#define TSM_NTTS_EXT_STAT_MASTER_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v5 2/8] net/ntnic: adds core registers and fpga functionality
  2023-08-21 11:34 ` [PATCH v5 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-21 11:34   ` Mykola Kostenok
  2023-08-21 11:34   ` [PATCH v5 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 11:34 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   71 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15441 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index f83c868a57..0a72e46174 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -11,13 +11,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..3850ccd934
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v5 3/8] net/ntnic: adds NT200A02 adapter support
  2023-08-21 11:34 ` [PATCH v5 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-21 11:34   ` [PATCH v5 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-08-21 11:34   ` Mykola Kostenok
  2023-08-21 11:34   ` [PATCH v5 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 11:34 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..07884e9219
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_master_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_slave_instance = NULL;
+
+	nthw_pcie3 *p_pci_master = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_slave = NULL;
+
+	assert(p_master_instance || p_pci_master);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_master,
+								    pri);
+			}
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_slave,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_enable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_enable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_enable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_enable(p_pci_slave);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_disable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_disable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_disable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_disable(p_pci_slave);
+
+			/* Post process master */
+			if (p_master_instance) {
+				nthw_hif_end_point_counters_sample(p_master_instance,
+							       pri);
+			}
+
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_master,
+								     pri);
+			}
+
+			/* Post process slave */
+			if (p_slave_instance) {
+				nthw_hif_end_point_counters_sample(p_slave_instance,
+							       sla);
+			}
+
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_slave,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_master_instance) {
+					nthw_hif_stat_req_enable(p_master_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_master_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0a72e46174..a8bf67af10 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,22 +10,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -35,6 +52,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -50,6 +68,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..c4c6779ce0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave)
+{
+	uint32_t n_block_mask = ~0U << (b_is_slave ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..b40f0a0994
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..bac4e925f9
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Master/Slave
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_MASTER, /* Adapter is master in the bonding */
+	NT_BONDING_SLAVE, /* Adapter is slave in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v5 4/8] net/ntnic: adds flow related FPGA functionality
  2023-08-21 11:34 ` [PATCH v5 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-21 11:34   ` [PATCH v5 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-08-21 11:34   ` [PATCH v5 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-08-21 11:34   ` Mykola Kostenok
  2023-08-21 11:34   ` [PATCH v5 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 11:34 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index a8bf67af10..96d8c7b49d 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v5 5/8] net/ntnic: adds FPGA abstraction layer
  2023-08-21 11:34 ` [PATCH v5 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-08-21 11:34   ` [PATCH v5 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-08-21 11:34   ` Mykola Kostenok
  2023-08-21 11:34   ` [PATCH v5 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 11:34 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  274 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14385 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 96d8c7b49d..e253265091 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..bee12b71f7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPCIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..311e39ba36
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+#define FLM_V17_MBR_ID1(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->a)
+#define FLM_V17_MBR_ID2(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->b)
+#define FLM_V17_MBR_ID3(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->a)
+#define FLM_V17_MBR_ID4(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->b)
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v5 6/8] net/ntnic: adds flow logic
  2023-08-21 11:34 ` [PATCH v5 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-08-21 11:34   ` [PATCH v5 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-08-21 11:34   ` Mykola Kostenok
  2023-08-21 11:34   ` [PATCH v5 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-08-21 11:34   ` [PATCH v5 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 11:34 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1306 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5118 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10078 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index e253265091..718359b67a 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -61,8 +61,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..2598e1e27b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..fa9240cb7b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdatomic.h>
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	volatile atomic_uint_fast64_t n_pkt;
+	volatile atomic_uint_fast64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	volatile atomic_uint_fast64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		atomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	atomic_store(&mtr_stat[mtr_id].stats_mask, 0);
+	atomic_store(&mtr_stat[mtr_id].n_bytes, 0);
+	atomic_store(&mtr_stat[mtr_id].n_pkt, 0);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (atomic_load(&mtr_stat->stats_mask))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				atomic_load(&mtr_stat[id].stats_mask);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				atomic_store(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB);
+				atomic_store(&mtr_stat[id].n_bytes, nb);
+				atomic_store(&mtr_stat[id].n_pkt, np);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = atomic_load(&mtr_stat[id].stats_mask);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = atomic_load(&mtr_stat[id].n_pkt);
+			} while (pkt_1 & UINT64_MSB);
+			nb = atomic_load(&mtr_stat[id].n_bytes);
+			pkt_2 = atomic_load(&mtr_stat[id].n_pkt);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+	if (mtr_ids) {
+		FLM_V17_MBR_ID1(learn_record.mbr_idx) = mtr_ids[0];
+		FLM_V17_MBR_ID2(learn_record.mbr_idx) = mtr_ids[1];
+		FLM_V17_MBR_ID3(learn_record.mbr_idx) = mtr_ids[2];
+		FLM_V17_MBR_ID4(learn_record.mbr_idx) = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			atomic_init(&mtr_stat[i].n_pkt, 0);
+			atomic_init(&mtr_stat[i].n_bytes, 0);
+			atomic_init(&mtr_stat[i].stats_mask, 0);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v5 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-21 11:34 ` [PATCH v5 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-08-21 11:34   ` [PATCH v5 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-08-21 11:34   ` Mykola Kostenok
  2023-08-21 11:34   ` [PATCH v5 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 11:34 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
v5:
* Add ntnic.rst to index file
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  357 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1246 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12514 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 8c3f2c993f..02aca74173 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 718359b67a..746dc65c4c 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -27,6 +27,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -113,6 +116,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..ce07d5a8cd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdatomic.h>
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 };
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id = start_queue + i;
+			queue_ids[i].hw_id = queue_ids[i].id;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id = start_queue + i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	atomic_store(&internals->vhid, vhid);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..a82027cbe7
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <stdatomic.h>
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	volatile atomic_int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..b1cc4d2959
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DONT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..c520a43c44
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#if RTE_VERSION_NUM(23, 3, 0, 99) > RTE_VERSION
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES            \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#else
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#endif
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile atomic_int_fast32_t started;
+	volatile atomic_int_fast32_t dev_attached;
+	volatile atomic_int_fast32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile atomic_int_fast32_t dma_mapped;
+	volatile atomic_int_fast32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && atomic_load(&internal->dma_mapped)) ||
+			(!do_map && !atomic_load(&internal->dma_mapped))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 1);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 0);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (atomic_load(&internal->intr_enabled))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 1);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!atomic_load(&internal->intr_enabled))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 0);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!atomic_load(&internal->running) &&
+			(atomic_load(&internal->started) &&
+			 atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 1);
+	} else if (atomic_load(&internal->running) &&
+			(!atomic_load(&internal->started) ||
+			 !atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 0);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	atomic_store(&internal->dev_attached, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	atomic_store(&internal->dev_attached, 0);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	atomic_store(&internal->started, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	atomic_store(&internal->started, 0);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..e034e33c89
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = { 0 };
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v5 8/8] net/ntnic: adds socket connection to PMD
  2023-08-21 11:34 ` [PATCH v5 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-08-21 11:34   ` [PATCH v5 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-21 11:34   ` Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 11:34 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1310 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  876 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5354 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 746dc65c4c..5889b4470a 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -7,6 +7,22 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64') or not dpdk_conf.has('RT
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -20,6 +36,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -41,6 +58,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..22ef7828c7
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	*(uint32_t *)err_buf = (uint32_t)ntcerr->err_code;
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..31d5dc3edc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 7 && memcmp(tok, "in_port=", 5) == 0)
+			in_port = atoi(tok + 7);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vpath=", 6) == 0)
+			strlcpy(vpath, tok + 6, MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..5c8b8db39e
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,876 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec, size;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v6 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (9 preceding siblings ...)
  2023-08-21 11:34 ` [PATCH v5 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-21 13:54 ` Mykola Kostenok
  2023-08-21 13:54   ` [PATCH v6 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-08-22 15:41 ` [PATCH v7 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (9 subsequent siblings)
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 13:54 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
v5:
* Disable build for unsupported platforms.
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   36 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 12011 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..0e2c2d38cc
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if is_windows
+    build = false
+    reason = 'not supported on Windows'
+    subdir_done()
+endif
+
+if not dpdk_conf.has('RTE_ARCH_X86_64')
+    build = false
+    reason = 'only supported on x86_64'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..b8113b40da
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1689706895 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 456073826 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 3051597623 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 3265543206 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 599637710 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1689706895, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 456073826, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 3051597623, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 3265543206, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 599637710, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1689706895 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1689706895, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..3948ed3ae3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_MASTER (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SLAVE (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_MASTER_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_MASTER_ID (9363L)
+#define TSM_NTTS_EXT_STAT_MASTER_REV (9364L)
+#define TSM_NTTS_EXT_STAT_MASTER_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v6 2/8] net/ntnic: adds core registers and fpga functionality
  2023-08-21 13:54 ` [PATCH v6 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-21 13:54   ` Mykola Kostenok
  2023-08-21 13:54   ` [PATCH v6 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 13:54 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   71 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15441 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0e2c2d38cc..6f48af601f 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -17,13 +17,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..3850ccd934
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v6 3/8] net/ntnic: adds NT200A02 adapter support
  2023-08-21 13:54 ` [PATCH v6 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-21 13:54   ` [PATCH v6 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-08-21 13:54   ` Mykola Kostenok
  2023-08-21 13:54   ` [PATCH v6 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 13:54 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..07884e9219
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_master_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_slave_instance = NULL;
+
+	nthw_pcie3 *p_pci_master = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_slave = NULL;
+
+	assert(p_master_instance || p_pci_master);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_master,
+								    pri);
+			}
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_slave,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_enable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_enable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_enable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_enable(p_pci_slave);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_disable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_disable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_disable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_disable(p_pci_slave);
+
+			/* Post process master */
+			if (p_master_instance) {
+				nthw_hif_end_point_counters_sample(p_master_instance,
+							       pri);
+			}
+
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_master,
+								     pri);
+			}
+
+			/* Post process slave */
+			if (p_slave_instance) {
+				nthw_hif_end_point_counters_sample(p_slave_instance,
+							       sla);
+			}
+
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_slave,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_master_instance) {
+					nthw_hif_stat_req_enable(p_master_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_master_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 6f48af601f..1571111bce 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -16,22 +16,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -41,6 +58,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -56,6 +74,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..c4c6779ce0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave)
+{
+	uint32_t n_block_mask = ~0U << (b_is_slave ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..b40f0a0994
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..bac4e925f9
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Master/Slave
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_MASTER, /* Adapter is master in the bonding */
+	NT_BONDING_SLAVE, /* Adapter is slave in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v6 4/8] net/ntnic: adds flow related FPGA functionality
  2023-08-21 13:54 ` [PATCH v6 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-21 13:54   ` [PATCH v6 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-08-21 13:54   ` [PATCH v6 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-08-21 13:54   ` Mykola Kostenok
  2023-08-21 13:54   ` [PATCH v6 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 13:54 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 1571111bce..c4f0a912d3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -24,6 +24,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -65,6 +66,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v6 5/8] net/ntnic: adds FPGA abstraction layer
  2023-08-21 13:54 ` [PATCH v6 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-08-21 13:54   ` [PATCH v6 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-08-21 13:54   ` Mykola Kostenok
  2023-08-21 13:54   ` [PATCH v6 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 13:54 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  274 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14385 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index c4f0a912d3..b816021b11 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -24,6 +24,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -66,6 +67,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..bee12b71f7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPCIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..311e39ba36
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+#define FLM_V17_MBR_ID1(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->a)
+#define FLM_V17_MBR_ID2(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->b)
+#define FLM_V17_MBR_ID3(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->a)
+#define FLM_V17_MBR_ID4(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->b)
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v6 6/8] net/ntnic: adds flow logic
  2023-08-21 13:54 ` [PATCH v6 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-08-21 13:54   ` [PATCH v6 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-08-21 13:54   ` Mykola Kostenok
  2023-08-21 13:54   ` [PATCH v6 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-08-21 13:54   ` [PATCH v6 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 13:54 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1306 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5118 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10078 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index b816021b11..0b5ca13e48 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -67,8 +67,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -87,6 +89,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..2598e1e27b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..fa9240cb7b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdatomic.h>
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	volatile atomic_uint_fast64_t n_pkt;
+	volatile atomic_uint_fast64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	volatile atomic_uint_fast64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		atomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	atomic_store(&mtr_stat[mtr_id].stats_mask, 0);
+	atomic_store(&mtr_stat[mtr_id].n_bytes, 0);
+	atomic_store(&mtr_stat[mtr_id].n_pkt, 0);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (atomic_load(&mtr_stat->stats_mask))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				atomic_load(&mtr_stat[id].stats_mask);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				atomic_store(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB);
+				atomic_store(&mtr_stat[id].n_bytes, nb);
+				atomic_store(&mtr_stat[id].n_pkt, np);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = atomic_load(&mtr_stat[id].stats_mask);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = atomic_load(&mtr_stat[id].n_pkt);
+			} while (pkt_1 & UINT64_MSB);
+			nb = atomic_load(&mtr_stat[id].n_bytes);
+			pkt_2 = atomic_load(&mtr_stat[id].n_pkt);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+	if (mtr_ids) {
+		FLM_V17_MBR_ID1(learn_record.mbr_idx) = mtr_ids[0];
+		FLM_V17_MBR_ID2(learn_record.mbr_idx) = mtr_ids[1];
+		FLM_V17_MBR_ID3(learn_record.mbr_idx) = mtr_ids[2];
+		FLM_V17_MBR_ID4(learn_record.mbr_idx) = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			atomic_init(&mtr_stat[i].n_pkt, 0);
+			atomic_init(&mtr_stat[i].n_bytes, 0);
+			atomic_init(&mtr_stat[i].stats_mask, 0);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v6 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-21 13:54 ` [PATCH v6 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-08-21 13:54   ` [PATCH v6 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-08-21 13:54   ` Mykola Kostenok
  2023-08-21 13:54   ` [PATCH v6 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 13:54 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
v5:
* Add ntnic.rst to index file
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  357 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1246 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12514 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 8c3f2c993f..02aca74173 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0b5ca13e48..d3c3129a0c 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -33,6 +33,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -119,6 +122,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..ce07d5a8cd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdatomic.h>
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 };
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id = start_queue + i;
+			queue_ids[i].hw_id = queue_ids[i].id;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id = start_queue + i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	atomic_store(&internals->vhid, vhid);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..a82027cbe7
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <stdatomic.h>
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	volatile atomic_int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..b1cc4d2959
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DONT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..c520a43c44
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#if RTE_VERSION_NUM(23, 3, 0, 99) > RTE_VERSION
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES            \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#else
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#endif
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile atomic_int_fast32_t started;
+	volatile atomic_int_fast32_t dev_attached;
+	volatile atomic_int_fast32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile atomic_int_fast32_t dma_mapped;
+	volatile atomic_int_fast32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && atomic_load(&internal->dma_mapped)) ||
+			(!do_map && !atomic_load(&internal->dma_mapped))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 1);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 0);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (atomic_load(&internal->intr_enabled))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 1);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!atomic_load(&internal->intr_enabled))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 0);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!atomic_load(&internal->running) &&
+			(atomic_load(&internal->started) &&
+			 atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 1);
+	} else if (atomic_load(&internal->running) &&
+			(!atomic_load(&internal->started) ||
+			 !atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 0);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	atomic_store(&internal->dev_attached, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	atomic_store(&internal->dev_attached, 0);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	atomic_store(&internal->started, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	atomic_store(&internal->started, 0);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..e034e33c89
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = { 0 };
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v6 8/8] net/ntnic: adds socket connection to PMD
  2023-08-21 13:54 ` [PATCH v6 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-08-21 13:54   ` [PATCH v6 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-21 13:54   ` Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-21 13:54 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1310 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  876 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5354 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index d3c3129a0c..bc34ae80b2 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -13,6 +13,22 @@ if not dpdk_conf.has('RTE_ARCH_X86_64')
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -26,6 +42,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -47,6 +64,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..22ef7828c7
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	*(uint32_t *)err_buf = (uint32_t)ntcerr->err_code;
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..31d5dc3edc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 7 && memcmp(tok, "in_port=", 5) == 0)
+			in_port = atoi(tok + 7);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vpath=", 6) == 0)
+			strlcpy(vpath, tok + 6, MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..5c8b8db39e
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,876 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec, size;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v7 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (10 preceding siblings ...)
  2023-08-21 13:54 ` [PATCH v6 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-22 15:41 ` Mykola Kostenok
  2023-08-22 15:41   ` [PATCH v7 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-08-29  8:15 ` [PATCH v8 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (8 subsequent siblings)
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-22 15:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
v5:
* Disable build for unsupported platforms.
v7:
* Update unsupported platforms.
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   30 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 12005 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..1194ce6aea
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
+    build = false
+    reason = 'only supported on x86_64 Linux'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..b8113b40da
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1689706895 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 456073826 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 3051597623 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 3265543206 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 599637710 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1689706895, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 456073826, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 3051597623, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 3265543206, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 599637710, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1689706895 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1689706895, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..3948ed3ae3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_MASTER (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SLAVE (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_MASTER_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_MASTER_ID (9363L)
+#define TSM_NTTS_EXT_STAT_MASTER_REV (9364L)
+#define TSM_NTTS_EXT_STAT_MASTER_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v7 2/8] net/ntnic: adds core registers and fpga functionality
  2023-08-22 15:41 ` [PATCH v7 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-22 15:41   ` Mykola Kostenok
  2023-08-22 15:41   ` [PATCH v7 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-22 15:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   71 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15441 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 1194ce6aea..428fc7af98 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -11,13 +11,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..3850ccd934
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v7 3/8] net/ntnic: adds NT200A02 adapter support
  2023-08-22 15:41 ` [PATCH v7 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-22 15:41   ` [PATCH v7 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-08-22 15:41   ` Mykola Kostenok
  2023-08-22 15:41   ` [PATCH v7 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-22 15:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..07884e9219
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_master_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_slave_instance = NULL;
+
+	nthw_pcie3 *p_pci_master = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_slave = NULL;
+
+	assert(p_master_instance || p_pci_master);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_master,
+								    pri);
+			}
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_slave,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_enable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_enable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_enable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_enable(p_pci_slave);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_disable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_disable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_disable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_disable(p_pci_slave);
+
+			/* Post process master */
+			if (p_master_instance) {
+				nthw_hif_end_point_counters_sample(p_master_instance,
+							       pri);
+			}
+
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_master,
+								     pri);
+			}
+
+			/* Post process slave */
+			if (p_slave_instance) {
+				nthw_hif_end_point_counters_sample(p_slave_instance,
+							       sla);
+			}
+
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_slave,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_master_instance) {
+					nthw_hif_stat_req_enable(p_master_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_master_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 428fc7af98..2552b5d68d 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,22 +10,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -35,6 +52,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -50,6 +68,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..c4c6779ce0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave)
+{
+	uint32_t n_block_mask = ~0U << (b_is_slave ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..b40f0a0994
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..bac4e925f9
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Master/Slave
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_MASTER, /* Adapter is master in the bonding */
+	NT_BONDING_SLAVE, /* Adapter is slave in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v7 4/8] net/ntnic: adds flow related FPGA functionality
  2023-08-22 15:41 ` [PATCH v7 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-22 15:41   ` [PATCH v7 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-08-22 15:41   ` [PATCH v7 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-08-22 15:41   ` Mykola Kostenok
  2023-08-22 15:41   ` [PATCH v7 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-22 15:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 2552b5d68d..8c065ee9a3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v7 5/8] net/ntnic: adds FPGA abstraction layer
  2023-08-22 15:41 ` [PATCH v7 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-08-22 15:41   ` [PATCH v7 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-08-22 15:41   ` Mykola Kostenok
  2023-08-22 15:41   ` [PATCH v7 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-22 15:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  274 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14385 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8c065ee9a3..8a5a3d5deb 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..bee12b71f7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPCIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..311e39ba36
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+#define FLM_V17_MBR_ID1(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->a)
+#define FLM_V17_MBR_ID2(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->b)
+#define FLM_V17_MBR_ID3(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->a)
+#define FLM_V17_MBR_ID4(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->b)
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v7 6/8] net/ntnic: adds flow logic
  2023-08-22 15:41 ` [PATCH v7 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-08-22 15:41   ` [PATCH v7 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-08-22 15:41   ` Mykola Kostenok
  2023-08-22 15:41   ` [PATCH v7 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-08-22 15:41   ` [PATCH v7 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-22 15:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1306 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5118 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10078 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8a5a3d5deb..0ae574f9ca 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -61,8 +61,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..2598e1e27b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..fa9240cb7b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdatomic.h>
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	volatile atomic_uint_fast64_t n_pkt;
+	volatile atomic_uint_fast64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	volatile atomic_uint_fast64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		atomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	atomic_store(&mtr_stat[mtr_id].stats_mask, 0);
+	atomic_store(&mtr_stat[mtr_id].n_bytes, 0);
+	atomic_store(&mtr_stat[mtr_id].n_pkt, 0);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (atomic_load(&mtr_stat->stats_mask))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				atomic_load(&mtr_stat[id].stats_mask);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				atomic_store(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB);
+				atomic_store(&mtr_stat[id].n_bytes, nb);
+				atomic_store(&mtr_stat[id].n_pkt, np);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = atomic_load(&mtr_stat[id].stats_mask);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = atomic_load(&mtr_stat[id].n_pkt);
+			} while (pkt_1 & UINT64_MSB);
+			nb = atomic_load(&mtr_stat[id].n_bytes);
+			pkt_2 = atomic_load(&mtr_stat[id].n_pkt);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+	if (mtr_ids) {
+		FLM_V17_MBR_ID1(learn_record.mbr_idx) = mtr_ids[0];
+		FLM_V17_MBR_ID2(learn_record.mbr_idx) = mtr_ids[1];
+		FLM_V17_MBR_ID3(learn_record.mbr_idx) = mtr_ids[2];
+		FLM_V17_MBR_ID4(learn_record.mbr_idx) = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			atomic_init(&mtr_stat[i].n_pkt, 0);
+			atomic_init(&mtr_stat[i].n_bytes, 0);
+			atomic_init(&mtr_stat[i].stats_mask, 0);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v7 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-22 15:41 ` [PATCH v7 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-08-22 15:41   ` [PATCH v7 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-08-22 15:41   ` Mykola Kostenok
  2023-08-29  8:13     ` David Marchand
  2023-08-22 15:41   ` [PATCH v7 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 1 reply; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-22 15:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
v5:
* Add ntnic.rst to index file
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  357 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1246 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12514 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 8c3f2c993f..02aca74173 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0ae574f9ca..f7454ffb79 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -27,6 +27,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -113,6 +116,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..ce07d5a8cd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdatomic.h>
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 };
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id = start_queue + i;
+			queue_ids[i].hw_id = queue_ids[i].id;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id = start_queue + i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	atomic_store(&internals->vhid, vhid);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..a82027cbe7
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <stdatomic.h>
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	volatile atomic_int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..b1cc4d2959
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DONT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..c520a43c44
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#if RTE_VERSION_NUM(23, 3, 0, 99) > RTE_VERSION
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES            \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#else
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#endif
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile atomic_int_fast32_t started;
+	volatile atomic_int_fast32_t dev_attached;
+	volatile atomic_int_fast32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile atomic_int_fast32_t dma_mapped;
+	volatile atomic_int_fast32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && atomic_load(&internal->dma_mapped)) ||
+			(!do_map && !atomic_load(&internal->dma_mapped))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 1);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 0);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (atomic_load(&internal->intr_enabled))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 1);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!atomic_load(&internal->intr_enabled))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 0);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!atomic_load(&internal->running) &&
+			(atomic_load(&internal->started) &&
+			 atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 1);
+	} else if (atomic_load(&internal->running) &&
+			(!atomic_load(&internal->started) ||
+			 !atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 0);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	atomic_store(&internal->dev_attached, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	atomic_store(&internal->dev_attached, 0);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	atomic_store(&internal->started, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	atomic_store(&internal->started, 0);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..e034e33c89
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = { 0 };
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v7 8/8] net/ntnic: adds socket connection to PMD
  2023-08-22 15:41 ` [PATCH v7 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-08-22 15:41   ` [PATCH v7 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-22 15:41   ` Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-22 15:41 UTC (permalink / raw)
  To: dev, mko-plv; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1310 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  876 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5354 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index f7454ffb79..ee8cf982ef 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -7,6 +7,22 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -20,6 +36,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -41,6 +58,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..22ef7828c7
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	*(uint32_t *)err_buf = (uint32_t)ntcerr->err_code;
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..31d5dc3edc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 7 && memcmp(tok, "in_port=", 5) == 0)
+			in_port = atoi(tok + 7);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vpath=", 6) == 0)
+			strlcpy(vpath, tok + 6, MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..5c8b8db39e
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,876 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec, size;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* RE: [PATCH 8/8] net/ntnic: adds socket connection to PMD
  2023-08-16 14:46   ` Stephen Hemminger
@ 2023-08-25 13:52     ` Christian Koue Muf
  0 siblings, 0 replies; 142+ messages in thread
From: Christian Koue Muf @ 2023-08-25 13:52 UTC (permalink / raw)
  To: Stephen Hemminger, Mykola Kostenok; +Cc: dev

On Wednesday, August 16, 2023 4:46 PM
Stephen Hemminger <stephen@networkplumber.org> wrote:

> On Wed, 16 Aug 2023 15:25:52 +0200
> Mykola Kostenok <mko-plv@napatech.com> wrote:
>
> > From: Christian Koue Muf <ckm@napatech.com>
> > 
> > The socket connection is used by Napatech's tools for monitoring and 
> > rte_flow programming from other processes.
> > 
> > Signed-off-by: Christian Koue Muf <ckm@napatech.com>
> > Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
>
> I would prefer that this be general and work with other PMD's.
> Why is existing telemetry model not good enough?

The existing telemetry is good enough in many cases. The problems arise in multi-container environments. The design of Napatech's adapters is that they only have 1 PF, which is owned by a single process in a single container. Other containers will only have access to VFs, which do not provide any metrics. The ntconnect socket will allow remote applications to access data from the application that owns the PF.

I understand your concerns for security. My suggestion would be to disable the code using meson.build config by default.

^ permalink raw reply	[flat|nested] 142+ messages in thread

* Re: [PATCH v7 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-22 15:41   ` [PATCH v7 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-29  8:13     ` David Marchand
  0 siblings, 0 replies; 142+ messages in thread
From: David Marchand @ 2023-08-29  8:13 UTC (permalink / raw)
  To: Mykola Kostenok; +Cc: dev, ckm

On Tue, Aug 22, 2023 at 5:42 PM Mykola Kostenok <mko-plv@napatech.com> wrote:
>
> From: Christian Koue Muf <ckm@napatech.com>
>
> Hooks into the DPDK API, and make the PMD available to use.
> Also adds documentation as .rst and .ini files.
>
> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>

*Disclaimer* I did not review this series.

I noticed inclusion of linux/pci_regs.h in this patch, and afaics this
header does not seem needed, please remove it.

For the context of this request, I have this cleanup series pending:
https://patchwork.dpdk.org/project/dpdk/list/?series=29292&state=%2A&archive=both
If you do need to include pci_regs.h and my series does not cover your
needs, feel free to describe them.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 142+ messages in thread

* [PATCH v8 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (11 preceding siblings ...)
  2023-08-22 15:41 ` [PATCH v7 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-29  8:15 ` Mykola Kostenok
  2023-08-29  8:15   ` [PATCH v8 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-08-29 10:17 ` [PATCH v9 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (7 subsequent siblings)
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29  8:15 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
v5:
* Disable build for unsupported platforms.
v7:
* Update unsupported platforms.
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   30 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 12005 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..1194ce6aea
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
+    build = false
+    reason = 'only supported on x86_64 Linux'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..b8113b40da
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1689706895 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 456073826 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 3051597623 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 3265543206 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 599637710 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1689706895, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 456073826, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 3051597623, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 3265543206, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 599637710, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1689706895 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1689706895, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..3948ed3ae3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_MASTER (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SLAVE (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_MASTER_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_MASTER_ID (9363L)
+#define TSM_NTTS_EXT_STAT_MASTER_REV (9364L)
+#define TSM_NTTS_EXT_STAT_MASTER_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v8 2/8] net/ntnic: adds core registers and fpga functionality
  2023-08-29  8:15 ` [PATCH v8 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-29  8:15   ` Mykola Kostenok
  2023-08-29  8:15   ` [PATCH v8 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29  8:15 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   71 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15441 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 1194ce6aea..428fc7af98 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -11,13 +11,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..3850ccd934
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v8 3/8] net/ntnic: adds NT200A02 adapter support
  2023-08-29  8:15 ` [PATCH v8 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-29  8:15   ` [PATCH v8 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-08-29  8:15   ` Mykola Kostenok
  2023-08-29  8:15   ` [PATCH v8 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29  8:15 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..07884e9219
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_master_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_slave_instance = NULL;
+
+	nthw_pcie3 *p_pci_master = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_slave = NULL;
+
+	assert(p_master_instance || p_pci_master);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_master,
+								    pri);
+			}
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_slave,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_enable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_enable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_enable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_enable(p_pci_slave);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_disable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_disable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_disable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_disable(p_pci_slave);
+
+			/* Post process master */
+			if (p_master_instance) {
+				nthw_hif_end_point_counters_sample(p_master_instance,
+							       pri);
+			}
+
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_master,
+								     pri);
+			}
+
+			/* Post process slave */
+			if (p_slave_instance) {
+				nthw_hif_end_point_counters_sample(p_slave_instance,
+							       sla);
+			}
+
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_slave,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_master_instance) {
+					nthw_hif_stat_req_enable(p_master_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_master_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 428fc7af98..2552b5d68d 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,22 +10,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -35,6 +52,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -50,6 +68,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..c4c6779ce0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave)
+{
+	uint32_t n_block_mask = ~0U << (b_is_slave ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..b40f0a0994
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..bac4e925f9
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Master/Slave
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_MASTER, /* Adapter is master in the bonding */
+	NT_BONDING_SLAVE, /* Adapter is slave in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v8 4/8] net/ntnic: adds flow related FPGA functionality
  2023-08-29  8:15 ` [PATCH v8 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-29  8:15   ` [PATCH v8 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-08-29  8:15   ` [PATCH v8 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-08-29  8:15   ` Mykola Kostenok
  2023-08-29  8:15   ` [PATCH v8 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29  8:15 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 2552b5d68d..8c065ee9a3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v8 5/8] net/ntnic: adds FPGA abstraction layer
  2023-08-29  8:15 ` [PATCH v8 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-08-29  8:15   ` [PATCH v8 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-08-29  8:15   ` Mykola Kostenok
  2023-08-29  8:15   ` [PATCH v8 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29  8:15 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  274 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14385 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8c065ee9a3..8a5a3d5deb 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..bee12b71f7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPCIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..311e39ba36
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+#define FLM_V17_MBR_ID1(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->a)
+#define FLM_V17_MBR_ID2(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->b)
+#define FLM_V17_MBR_ID3(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->a)
+#define FLM_V17_MBR_ID4(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->b)
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v8 6/8] net/ntnic: adds flow logic
  2023-08-29  8:15 ` [PATCH v8 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-08-29  8:15   ` [PATCH v8 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-08-29  8:15   ` Mykola Kostenok
  2023-08-29  8:15   ` [PATCH v8 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-08-29  8:15   ` [PATCH v8 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29  8:15 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1306 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5118 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10078 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8a5a3d5deb..0ae574f9ca 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -61,8 +61,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..2598e1e27b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..fa9240cb7b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdatomic.h>
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	volatile atomic_uint_fast64_t n_pkt;
+	volatile atomic_uint_fast64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	volatile atomic_uint_fast64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		atomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	atomic_store(&mtr_stat[mtr_id].stats_mask, 0);
+	atomic_store(&mtr_stat[mtr_id].n_bytes, 0);
+	atomic_store(&mtr_stat[mtr_id].n_pkt, 0);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (atomic_load(&mtr_stat->stats_mask))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				atomic_load(&mtr_stat[id].stats_mask);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				atomic_store(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB);
+				atomic_store(&mtr_stat[id].n_bytes, nb);
+				atomic_store(&mtr_stat[id].n_pkt, np);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = atomic_load(&mtr_stat[id].stats_mask);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = atomic_load(&mtr_stat[id].n_pkt);
+			} while (pkt_1 & UINT64_MSB);
+			nb = atomic_load(&mtr_stat[id].n_bytes);
+			pkt_2 = atomic_load(&mtr_stat[id].n_pkt);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+	if (mtr_ids) {
+		FLM_V17_MBR_ID1(learn_record.mbr_idx) = mtr_ids[0];
+		FLM_V17_MBR_ID2(learn_record.mbr_idx) = mtr_ids[1];
+		FLM_V17_MBR_ID3(learn_record.mbr_idx) = mtr_ids[2];
+		FLM_V17_MBR_ID4(learn_record.mbr_idx) = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			atomic_init(&mtr_stat[i].n_pkt, 0);
+			atomic_init(&mtr_stat[i].n_bytes, 0);
+			atomic_init(&mtr_stat[i].stats_mask, 0);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v8 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-29  8:15 ` [PATCH v8 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-08-29  8:15   ` [PATCH v8 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-08-29  8:15   ` Mykola Kostenok
  2023-08-29  8:15   ` [PATCH v8 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29  8:15 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
v5:
* Add ntnic.rst to index file
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  357 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1246 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12514 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 8c3f2c993f..02aca74173 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0ae574f9ca..f7454ffb79 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -27,6 +27,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -113,6 +116,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..ce07d5a8cd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdatomic.h>
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 };
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id = start_queue + i;
+			queue_ids[i].hw_id = queue_ids[i].id;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id = start_queue + i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	atomic_store(&internals->vhid, vhid);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..a82027cbe7
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <stdatomic.h>
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	volatile atomic_int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..b1cc4d2959
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DONT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..c520a43c44
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#if RTE_VERSION_NUM(23, 3, 0, 99) > RTE_VERSION
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES            \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#else
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#endif
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile atomic_int_fast32_t started;
+	volatile atomic_int_fast32_t dev_attached;
+	volatile atomic_int_fast32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile atomic_int_fast32_t dma_mapped;
+	volatile atomic_int_fast32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && atomic_load(&internal->dma_mapped)) ||
+			(!do_map && !atomic_load(&internal->dma_mapped))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 1);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 0);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (atomic_load(&internal->intr_enabled))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 1);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!atomic_load(&internal->intr_enabled))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 0);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!atomic_load(&internal->running) &&
+			(atomic_load(&internal->started) &&
+			 atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 1);
+	} else if (atomic_load(&internal->running) &&
+			(!atomic_load(&internal->started) ||
+			 !atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 0);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	atomic_store(&internal->dev_attached, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	atomic_store(&internal->dev_attached, 0);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	atomic_store(&internal->started, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	atomic_store(&internal->started, 0);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..e034e33c89
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = { 0 };
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v8 8/8] net/ntnic: adds socket connection to PMD
  2023-08-29  8:15 ` [PATCH v8 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-08-29  8:15   ` [PATCH v8 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-29  8:15   ` Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29  8:15 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
v8:
* Fixed token parser constant length.
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1312 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  876 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5356 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index f7454ffb79..ee8cf982ef 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -7,6 +7,22 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -20,6 +36,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -41,6 +58,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..22ef7828c7
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	*(uint32_t *)err_buf = (uint32_t)ntcerr->err_code;
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..3d81242524
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1312 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define IN_PORT_TOK "in_port="
+#define VPATH_TOK "vpath="
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(IN_PORT_TOK) && memcmp(tok, IN_PORT_TOK,
+							   strlen(IN_PORT_TOK)) == 0)
+			in_port = atoi(tok + strlen(IN_PORT_TOK));
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(VPATH_TOK) && memcmp(tok, VPATH_TOK, strlen(VPATH_TOK)) == 0)
+			strlcpy(vpath, tok + strlen(VPATH_TOK), MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..5c8b8db39e
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,876 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec, size;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v9 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (12 preceding siblings ...)
  2023-08-29  8:15 ` [PATCH v8 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-29 10:17 ` Mykola Kostenok
  2023-08-29 10:17   ` [PATCH v9 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-08-30 16:51 ` [PATCH v10 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (6 subsequent siblings)
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29 10:17 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
v5:
* Disable build for unsupported platforms.
v7:
* Update unsupported platforms.
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   30 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 12005 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..1194ce6aea
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
+    build = false
+    reason = 'only supported on x86_64 Linux'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..b8113b40da
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1689706895 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 456073826 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 3051597623 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 3265543206 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 599637710 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1689706895, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 456073826, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 3051597623, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 3265543206, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 599637710, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1689706895 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1689706895, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..3948ed3ae3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_MASTER (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SLAVE (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_MASTER_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_MASTER_ID (9363L)
+#define TSM_NTTS_EXT_STAT_MASTER_REV (9364L)
+#define TSM_NTTS_EXT_STAT_MASTER_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v9 2/8] net/ntnic: adds core registers and fpga functionality
  2023-08-29 10:17 ` [PATCH v9 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-29 10:17   ` Mykola Kostenok
  2023-08-29 10:17   ` [PATCH v9 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29 10:17 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v9:
* Add missing header
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   72 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15442 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 1194ce6aea..428fc7af98 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -11,13 +11,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..cc6891e82c
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_bitops.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v9 3/8] net/ntnic: adds NT200A02 adapter support
  2023-08-29 10:17 ` [PATCH v9 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-29 10:17   ` [PATCH v9 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-08-29 10:17   ` Mykola Kostenok
  2023-08-29 10:17   ` [PATCH v9 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29 10:17 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..07884e9219
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_master_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_slave_instance = NULL;
+
+	nthw_pcie3 *p_pci_master = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_slave = NULL;
+
+	assert(p_master_instance || p_pci_master);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_master,
+								    pri);
+			}
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_slave,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_enable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_enable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_enable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_enable(p_pci_slave);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_disable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_disable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_disable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_disable(p_pci_slave);
+
+			/* Post process master */
+			if (p_master_instance) {
+				nthw_hif_end_point_counters_sample(p_master_instance,
+							       pri);
+			}
+
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_master,
+								     pri);
+			}
+
+			/* Post process slave */
+			if (p_slave_instance) {
+				nthw_hif_end_point_counters_sample(p_slave_instance,
+							       sla);
+			}
+
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_slave,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_master_instance) {
+					nthw_hif_stat_req_enable(p_master_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_master_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 428fc7af98..2552b5d68d 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,22 +10,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -35,6 +52,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -50,6 +68,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..c4c6779ce0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave)
+{
+	uint32_t n_block_mask = ~0U << (b_is_slave ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..b40f0a0994
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..bac4e925f9
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Master/Slave
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_MASTER, /* Adapter is master in the bonding */
+	NT_BONDING_SLAVE, /* Adapter is slave in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v9 4/8] net/ntnic: adds flow related FPGA functionality
  2023-08-29 10:17 ` [PATCH v9 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-29 10:17   ` [PATCH v9 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-08-29 10:17   ` [PATCH v9 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-08-29 10:17   ` Mykola Kostenok
  2023-08-29 10:17   ` [PATCH v9 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29 10:17 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 2552b5d68d..8c065ee9a3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v9 5/8] net/ntnic: adds FPGA abstraction layer
  2023-08-29 10:17 ` [PATCH v9 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-08-29 10:17   ` [PATCH v9 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-08-29 10:17   ` Mykola Kostenok
  2023-08-29 10:17   ` [PATCH v9 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29 10:17 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  274 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14385 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8c065ee9a3..8a5a3d5deb 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..bee12b71f7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPCIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..311e39ba36
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+#define FLM_V17_MBR_ID1(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->a)
+#define FLM_V17_MBR_ID2(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->b)
+#define FLM_V17_MBR_ID3(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->a)
+#define FLM_V17_MBR_ID4(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->b)
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v9 6/8] net/ntnic: adds flow logic
  2023-08-29 10:17 ` [PATCH v9 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-08-29 10:17   ` [PATCH v9 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-08-29 10:17   ` Mykola Kostenok
  2023-08-29 10:17   ` [PATCH v9 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-08-29 10:17   ` [PATCH v9 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29 10:17 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1306 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5118 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10078 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8a5a3d5deb..0ae574f9ca 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -61,8 +61,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..2598e1e27b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..fa9240cb7b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdatomic.h>
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	volatile atomic_uint_fast64_t n_pkt;
+	volatile atomic_uint_fast64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	volatile atomic_uint_fast64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		atomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	atomic_store(&mtr_stat[mtr_id].stats_mask, 0);
+	atomic_store(&mtr_stat[mtr_id].n_bytes, 0);
+	atomic_store(&mtr_stat[mtr_id].n_pkt, 0);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (atomic_load(&mtr_stat->stats_mask))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				atomic_load(&mtr_stat[id].stats_mask);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				atomic_store(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB);
+				atomic_store(&mtr_stat[id].n_bytes, nb);
+				atomic_store(&mtr_stat[id].n_pkt, np);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = atomic_load(&mtr_stat[id].stats_mask);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = atomic_load(&mtr_stat[id].n_pkt);
+			} while (pkt_1 & UINT64_MSB);
+			nb = atomic_load(&mtr_stat[id].n_bytes);
+			pkt_2 = atomic_load(&mtr_stat[id].n_pkt);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+	if (mtr_ids) {
+		FLM_V17_MBR_ID1(learn_record.mbr_idx) = mtr_ids[0];
+		FLM_V17_MBR_ID2(learn_record.mbr_idx) = mtr_ids[1];
+		FLM_V17_MBR_ID3(learn_record.mbr_idx) = mtr_ids[2];
+		FLM_V17_MBR_ID4(learn_record.mbr_idx) = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			atomic_init(&mtr_stat[i].n_pkt, 0);
+			atomic_init(&mtr_stat[i].n_bytes, 0);
+			atomic_init(&mtr_stat[i].stats_mask, 0);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v9 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-29 10:17 ` [PATCH v9 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-08-29 10:17   ` [PATCH v9 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-08-29 10:17   ` Mykola Kostenok
  2023-08-29 10:17   ` [PATCH v9 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29 10:17 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
v5:
* Add ntnic.rst to index file
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  357 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1246 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12514 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index a926155f26..87ac68ee24 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0ae574f9ca..f7454ffb79 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -27,6 +27,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -113,6 +116,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..ce07d5a8cd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdatomic.h>
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 };
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id = start_queue + i;
+			queue_ids[i].hw_id = queue_ids[i].id;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id = start_queue + i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	atomic_store(&internals->vhid, vhid);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..a82027cbe7
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <stdatomic.h>
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	volatile atomic_int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..b1cc4d2959
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DONT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..c520a43c44
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#if RTE_VERSION_NUM(23, 3, 0, 99) > RTE_VERSION
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES            \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#else
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#endif
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile atomic_int_fast32_t started;
+	volatile atomic_int_fast32_t dev_attached;
+	volatile atomic_int_fast32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile atomic_int_fast32_t dma_mapped;
+	volatile atomic_int_fast32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && atomic_load(&internal->dma_mapped)) ||
+			(!do_map && !atomic_load(&internal->dma_mapped))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 1);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 0);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (atomic_load(&internal->intr_enabled))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 1);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!atomic_load(&internal->intr_enabled))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 0);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!atomic_load(&internal->running) &&
+			(atomic_load(&internal->started) &&
+			 atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 1);
+	} else if (atomic_load(&internal->running) &&
+			(!atomic_load(&internal->started) ||
+			 !atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 0);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	atomic_store(&internal->dev_attached, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	atomic_store(&internal->dev_attached, 0);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	atomic_store(&internal->started, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	atomic_store(&internal->started, 0);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..e034e33c89
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = { 0 };
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v9 8/8] net/ntnic: adds socket connection to PMD
  2023-08-29 10:17 ` [PATCH v9 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-08-29 10:17   ` [PATCH v9 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-29 10:17   ` Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-29 10:17 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
v8:
* Fixed token parser constant length.
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1312 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  876 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5356 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index f7454ffb79..ee8cf982ef 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -7,6 +7,22 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -20,6 +36,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -41,6 +58,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..22ef7828c7
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	*(uint32_t *)err_buf = (uint32_t)ntcerr->err_code;
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..3d81242524
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1312 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define IN_PORT_TOK "in_port="
+#define VPATH_TOK "vpath="
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(IN_PORT_TOK) && memcmp(tok, IN_PORT_TOK,
+							   strlen(IN_PORT_TOK)) == 0)
+			in_port = atoi(tok + strlen(IN_PORT_TOK));
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(VPATH_TOK) && memcmp(tok, VPATH_TOK, strlen(VPATH_TOK)) == 0)
+			strlcpy(vpath, tok + strlen(VPATH_TOK), MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..5c8b8db39e
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,876 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec, size;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v10 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (13 preceding siblings ...)
  2023-08-29 10:17 ` [PATCH v9 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-30 16:51 ` Mykola Kostenok
  2023-08-30 16:51   ` [PATCH v10 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-08-31 12:23 ` [PATCH v11 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (5 subsequent siblings)
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-30 16:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
v5:
* Disable build for unsupported platforms.
v7:
* Update unsupported platforms.
v10:
* Update FPGA register defines.
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   30 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 12005 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..1194ce6aea
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
+    build = false
+    reason = 'only supported on x86_64 Linux'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..2d6a31b35f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1693228548 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 929302248 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 2904641880 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 55459253 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 4051580681 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1693228548, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 929302248, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 2904641880, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 55459253, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 4051580681, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1693228548 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1693228548, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..3948ed3ae3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_MASTER (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SLAVE (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_MASTER_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_MASTER_ID (9363L)
+#define TSM_NTTS_EXT_STAT_MASTER_REV (9364L)
+#define TSM_NTTS_EXT_STAT_MASTER_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v10 2/8] net/ntnic: adds core registers and fpga functionality
  2023-08-30 16:51 ` [PATCH v10 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-30 16:51   ` Mykola Kostenok
  2023-08-30 16:51   ` [PATCH v10 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-30 16:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v9:
* Add missing header
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   72 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15442 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 1194ce6aea..428fc7af98 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -11,13 +11,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..cc6891e82c
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_bitops.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v10 3/8] net/ntnic: adds NT200A02 adapter support
  2023-08-30 16:51 ` [PATCH v10 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-30 16:51   ` [PATCH v10 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-08-30 16:51   ` Mykola Kostenok
  2023-08-30 16:51   ` [PATCH v10 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-30 16:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..07884e9219
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_master_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_slave_instance = NULL;
+
+	nthw_pcie3 *p_pci_master = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_slave = NULL;
+
+	assert(p_master_instance || p_pci_master);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_master,
+								    pri);
+			}
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_slave,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_enable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_enable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_enable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_enable(p_pci_slave);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_disable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_disable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_disable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_disable(p_pci_slave);
+
+			/* Post process master */
+			if (p_master_instance) {
+				nthw_hif_end_point_counters_sample(p_master_instance,
+							       pri);
+			}
+
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_master,
+								     pri);
+			}
+
+			/* Post process slave */
+			if (p_slave_instance) {
+				nthw_hif_end_point_counters_sample(p_slave_instance,
+							       sla);
+			}
+
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_slave,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_master_instance) {
+					nthw_hif_stat_req_enable(p_master_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_master_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 428fc7af98..2552b5d68d 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,22 +10,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -35,6 +52,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -50,6 +68,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..c4c6779ce0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave)
+{
+	uint32_t n_block_mask = ~0U << (b_is_slave ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..b40f0a0994
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..bac4e925f9
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Master/Slave
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_MASTER, /* Adapter is master in the bonding */
+	NT_BONDING_SLAVE, /* Adapter is slave in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v10 4/8] net/ntnic: adds flow related FPGA functionality
  2023-08-30 16:51 ` [PATCH v10 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-30 16:51   ` [PATCH v10 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-08-30 16:51   ` [PATCH v10 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-08-30 16:51   ` Mykola Kostenok
  2023-08-30 16:51   ` [PATCH v10 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-30 16:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 2552b5d68d..8c065ee9a3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v10 5/8] net/ntnic: adds FPGA abstraction layer
  2023-08-30 16:51 ` [PATCH v10 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-08-30 16:51   ` [PATCH v10 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-08-30 16:51   ` Mykola Kostenok
  2023-08-30 16:51   ` [PATCH v10 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-30 16:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  274 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14385 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8c065ee9a3..8a5a3d5deb 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..bee12b71f7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPCIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..311e39ba36
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+#define FLM_V17_MBR_ID1(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->a)
+#define FLM_V17_MBR_ID2(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)(mbr_id_ptr))->b)
+#define FLM_V17_MBR_ID3(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->a)
+#define FLM_V17_MBR_ID4(mbr_id_ptr) \
+	(((struct flm_v17_mbr_idx_overlay *)((mbr_id_ptr) + 7))->b)
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v10 6/8] net/ntnic: adds flow logic
  2023-08-30 16:51 ` [PATCH v10 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-08-30 16:51   ` [PATCH v10 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-08-30 16:51   ` Mykola Kostenok
  2023-08-30 16:51   ` [PATCH v10 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-08-30 16:51   ` [PATCH v10 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-30 16:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1306 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5118 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10078 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8a5a3d5deb..0ae574f9ca 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -61,8 +61,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..2598e1e27b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..fa9240cb7b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdatomic.h>
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	volatile atomic_uint_fast64_t n_pkt;
+	volatile atomic_uint_fast64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	volatile atomic_uint_fast64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		atomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	atomic_store(&mtr_stat[mtr_id].stats_mask, 0);
+	atomic_store(&mtr_stat[mtr_id].n_bytes, 0);
+	atomic_store(&mtr_stat[mtr_id].n_pkt, 0);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (atomic_load(&mtr_stat->stats_mask))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				atomic_load(&mtr_stat[id].stats_mask);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				atomic_store(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB);
+				atomic_store(&mtr_stat[id].n_bytes, nb);
+				atomic_store(&mtr_stat[id].n_pkt, np);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = atomic_load(&mtr_stat[id].stats_mask);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = atomic_load(&mtr_stat[id].n_pkt);
+			} while (pkt_1 & UINT64_MSB);
+			nb = atomic_load(&mtr_stat[id].n_bytes);
+			pkt_2 = atomic_load(&mtr_stat[id].n_pkt);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+	if (mtr_ids) {
+		FLM_V17_MBR_ID1(learn_record.mbr_idx) = mtr_ids[0];
+		FLM_V17_MBR_ID2(learn_record.mbr_idx) = mtr_ids[1];
+		FLM_V17_MBR_ID3(learn_record.mbr_idx) = mtr_ids[2];
+		FLM_V17_MBR_ID4(learn_record.mbr_idx) = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			atomic_init(&mtr_stat[i].n_pkt, 0);
+			atomic_init(&mtr_stat[i].n_bytes, 0);
+			atomic_init(&mtr_stat[i].stats_mask, 0);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v10 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-30 16:51 ` [PATCH v10 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-08-30 16:51   ` [PATCH v10 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-08-30 16:51   ` Mykola Kostenok
  2023-08-30 16:51   ` [PATCH v10 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-30 16:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
v5:
* Add ntnic.rst to index file
v10:
* Fix wrong queue id range.
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  357 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1246 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12514 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index a926155f26..87ac68ee24 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0ae574f9ca..f7454ffb79 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -27,6 +27,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -113,6 +116,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..a4efacf6c0
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdatomic.h>
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 };
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id    = i;
+			queue_ids[i].hw_id = start_queue + i;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id    = i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	atomic_store(&internals->vhid, vhid);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..a82027cbe7
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <stdatomic.h>
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	volatile atomic_int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..b1cc4d2959
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DONT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..c520a43c44
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdatomic.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#if RTE_VERSION_NUM(23, 3, 0, 99) > RTE_VERSION
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES            \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#else
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#endif
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile atomic_int_fast32_t started;
+	volatile atomic_int_fast32_t dev_attached;
+	volatile atomic_int_fast32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile atomic_int_fast32_t dma_mapped;
+	volatile atomic_int_fast32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && atomic_load(&internal->dma_mapped)) ||
+			(!do_map && !atomic_load(&internal->dma_mapped))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 1);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			atomic_store(&internal->dma_mapped, 0);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (atomic_load(&internal->intr_enabled))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 1);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!atomic_load(&internal->intr_enabled))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	atomic_store(&internal->intr_enabled, 0);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!atomic_load(&internal->running) &&
+			(atomic_load(&internal->started) &&
+			 atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 1);
+	} else if (atomic_load(&internal->running) &&
+			(!atomic_load(&internal->started) ||
+			 !atomic_load(&internal->dev_attached))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		atomic_store(&internal->running, 0);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	atomic_store(&internal->dev_attached, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	atomic_store(&internal->dev_attached, 0);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	atomic_store(&internal->started, 1);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	atomic_store(&internal->started, 0);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..e034e33c89
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = { 0 };
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v10 8/8] net/ntnic: adds socket connection to PMD
  2023-08-30 16:51 ` [PATCH v10 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-08-30 16:51   ` [PATCH v10 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-30 16:51   ` Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-30 16:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
v8:
* Fixed token parser constant length.
v10:
* Fix uninitialized variables and build warnings.
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1312 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  877 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5357 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index f7454ffb79..ee8cf982ef 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -7,6 +7,22 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -20,6 +36,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -41,6 +58,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..697e101a03
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	memcpy(err_buf, &ntcerr->err_code, sizeof(uint32_t));
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..3d81242524
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1312 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define IN_PORT_TOK "in_port="
+#define VPATH_TOK "vpath="
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(IN_PORT_TOK) && memcmp(tok, IN_PORT_TOK,
+							   strlen(IN_PORT_TOK)) == 0)
+			in_port = atoi(tok + strlen(IN_PORT_TOK));
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(VPATH_TOK) && memcmp(tok, VPATH_TOK, strlen(VPATH_TOK)) == 0)
+			strlcpy(vpath, tok + strlen(VPATH_TOK), MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..437cf9ddad
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,877 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec = 0;
+	int size = 0;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v11 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (14 preceding siblings ...)
  2023-08-30 16:51 ` [PATCH v10 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-31 12:23 ` Mykola Kostenok
  2023-08-31 12:23   ` [PATCH v11 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-08-31 13:51 ` [PATCH v12 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (4 subsequent siblings)
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 12:23 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
v5:
* Disable build for unsupported platforms.
v7:
* Update unsupported platforms.
v10:
* Update FPGA register defines.
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   30 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 12005 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..1194ce6aea
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
+    build = false
+    reason = 'only supported on x86_64 Linux'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..2d6a31b35f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1693228548 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 929302248 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 2904641880 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 55459253 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 4051580681 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1693228548, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 929302248, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 2904641880, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 55459253, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 4051580681, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1693228548 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1693228548, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..3948ed3ae3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_MASTER (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SLAVE (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_MASTER_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_MASTER_ID (9363L)
+#define TSM_NTTS_EXT_STAT_MASTER_REV (9364L)
+#define TSM_NTTS_EXT_STAT_MASTER_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v11 2/8] net/ntnic: adds core registers and fpga functionality
  2023-08-31 12:23 ` [PATCH v11 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-31 12:23   ` Mykola Kostenok
  2023-08-31 12:23   ` [PATCH v11 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 12:23 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v9:
* Add missing header
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   72 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15442 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 1194ce6aea..428fc7af98 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -11,13 +11,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..cc6891e82c
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_bitops.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v11 3/8] net/ntnic: adds NT200A02 adapter support
  2023-08-31 12:23 ` [PATCH v11 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-31 12:23   ` [PATCH v11 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-08-31 12:23   ` Mykola Kostenok
  2023-08-31 12:23   ` [PATCH v11 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 12:23 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..07884e9219
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_master_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_slave_instance = NULL;
+
+	nthw_pcie3 *p_pci_master = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_slave = NULL;
+
+	assert(p_master_instance || p_pci_master);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_master,
+								    pri);
+			}
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_slave,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_enable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_enable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_enable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_enable(p_pci_slave);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_disable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_disable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_disable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_disable(p_pci_slave);
+
+			/* Post process master */
+			if (p_master_instance) {
+				nthw_hif_end_point_counters_sample(p_master_instance,
+							       pri);
+			}
+
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_master,
+								     pri);
+			}
+
+			/* Post process slave */
+			if (p_slave_instance) {
+				nthw_hif_end_point_counters_sample(p_slave_instance,
+							       sla);
+			}
+
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_slave,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_master_instance) {
+					nthw_hif_stat_req_enable(p_master_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_master_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 428fc7af98..2552b5d68d 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,22 +10,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -35,6 +52,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -50,6 +68,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..c4c6779ce0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave)
+{
+	uint32_t n_block_mask = ~0U << (b_is_slave ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..b40f0a0994
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..bac4e925f9
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Master/Slave
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_MASTER, /* Adapter is master in the bonding */
+	NT_BONDING_SLAVE, /* Adapter is slave in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v11 4/8] net/ntnic: adds flow related FPGA functionality
  2023-08-31 12:23 ` [PATCH v11 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-31 12:23   ` [PATCH v11 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-08-31 12:23   ` [PATCH v11 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-08-31 12:23   ` Mykola Kostenok
  2023-08-31 12:23   ` [PATCH v11 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 12:23 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 2552b5d68d..8c065ee9a3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v11 5/8] net/ntnic: adds FPGA abstraction layer
  2023-08-31 12:23 ` [PATCH v11 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-08-31 12:23   ` [PATCH v11 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-08-31 12:23   ` Mykola Kostenok
  2023-08-31 12:23   ` [PATCH v11 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 12:23 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
v11:
* Fix dereferencing type-punned pointer in macro
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  265 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14376 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8c065ee9a3..8a5a3d5deb 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..bee12b71f7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPCIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..9b4ee1991e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v11 6/8] net/ntnic: adds flow logic
  2023-08-31 12:23 ` [PATCH v11 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-08-31 12:23   ` [PATCH v11 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-08-31 12:23   ` Mykola Kostenok
  2023-08-31 12:23   ` [PATCH v11 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-08-31 12:23   ` [PATCH v11 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 12:23 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v11:
* Replace stdatomic by compiler build-in atomic
* Fix dereferencing type-punned pointer in macro
* Inner offset must exclude VLAN bytes
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1307 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5128 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10089 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8a5a3d5deb..0ae574f9ca 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -61,8 +61,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..8cdf15663d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+	/* 39 */  {"Invalid priority value"},
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..c6b1c0cc0a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	uint64_t n_pkt;
+	uint64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	uint64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		__atomic_store_n(&mtr_stat[mtr_id].stats_mask, stats_mask, __ATOMIC_RELAXED);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	__atomic_store_n(&mtr_stat[mtr_id].stats_mask, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_bytes, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_pkt, 0, __ATOMIC_RELAXED);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (__atomic_load_n(&mtr_stat->stats_mask, __ATOMIC_RELAXED))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				__atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				__atomic_store_n(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_bytes, nb, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_pkt, np, __ATOMIC_RELAXED);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = __atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+			} while (pkt_1 & UINT64_MSB);
+			nb = __atomic_load_n(&mtr_stat[id].n_bytes, __ATOMIC_RELAXED);
+			pkt_2 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+
+	struct flm_v17_mbr_idx_overlay *mbr_id1_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id2_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id3_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx + 7;
+	struct flm_v17_mbr_idx_overlay *mbr_id4_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx + 7;
+	if (mtr_ids) {
+		mbr_id1_ptr->a = mtr_ids[0];
+		mbr_id2_ptr->b = mtr_ids[1];
+		mbr_id3_ptr->a = mtr_ids[2];
+		mbr_id4_ptr->b = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			__atomic_store_n(&mtr_stat[i].n_pkt, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].n_bytes, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].stats_mask, 0, __ATOMIC_RELAXED);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v11 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-31 12:23 ` [PATCH v11 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-08-31 12:23   ` [PATCH v11 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-08-31 12:23   ` Mykola Kostenok
  2023-08-31 12:23   ` [PATCH v11 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 12:23 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
v5:
* Add ntnic.rst to index file
v10:
* Fix wrong queue id range.
v11:
* Repace stdatomic by compiler build-in atomic.
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  355 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1245 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12511 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index a926155f26..87ac68ee24 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0ae574f9ca..f7454ffb79 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -27,6 +27,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -113,6 +116,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..2c5e47f996
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = {{ 0 }};
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id    = i;
+			queue_ids[i].hw_id = start_queue + i;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id    = i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	__atomic_store_n(&internals->vhid, vhid, __ATOMIC_RELAXED);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..ee0d84ce82
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..b1cc4d2959
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DONT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..6fc90939a1
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1245 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#if RTE_VERSION_NUM(23, 3, 0, 99) > RTE_VERSION
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES            \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#else
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#endif
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile int32_t started;
+	volatile int32_t dev_attached;
+	volatile int32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile int32_t dma_mapped;
+	volatile int32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && __atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED)) ||
+			(!do_map && !__atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 1, __ATOMIC_RELAXED);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 0, __ATOMIC_RELAXED);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 1, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 0, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(__atomic_load_n(&internal->started, __ATOMIC_RELAXED) &&
+			 __atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 1, __ATOMIC_RELAXED);
+	} else if (__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(!__atomic_load_n(&internal->started, __ATOMIC_RELAXED) ||
+			 !__atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 0, __ATOMIC_RELAXED);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	__atomic_store_n(&internal->dev_attached, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	__atomic_store_n(&internal->dev_attached, 0, __ATOMIC_RELAXED);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	__atomic_store_n(&internal->started, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	__atomic_store_n(&internal->started, 0, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..c0e67ba03d
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = {{ 0 }};
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v11 8/8] net/ntnic: adds socket connection to PMD
  2023-08-31 12:23 ` [PATCH v11 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-08-31 12:23   ` [PATCH v11 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-31 12:23   ` Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 12:23 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
v8:
* Fixed token parser constant length.
v10:
* Fix uninitialized variables and build warnings.
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1312 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  877 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5357 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index f7454ffb79..ee8cf982ef 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -7,6 +7,22 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -20,6 +36,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -41,6 +58,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..697e101a03
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	memcpy(err_buf, &ntcerr->err_code, sizeof(uint32_t));
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..3d81242524
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1312 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define IN_PORT_TOK "in_port="
+#define VPATH_TOK "vpath="
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(IN_PORT_TOK) && memcmp(tok, IN_PORT_TOK,
+							   strlen(IN_PORT_TOK)) == 0)
+			in_port = atoi(tok + strlen(IN_PORT_TOK));
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(VPATH_TOK) && memcmp(tok, VPATH_TOK, strlen(VPATH_TOK)) == 0)
+			strlcpy(vpath, tok + strlen(VPATH_TOK), MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..437cf9ddad
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,877 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec = 0;
+	int size = 0;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v12 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (15 preceding siblings ...)
  2023-08-31 12:23 ` [PATCH v11 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-31 13:51 ` Mykola Kostenok
  2023-08-31 13:51   ` [PATCH v12 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-09-01 12:18 ` [PATCH v13 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (3 subsequent siblings)
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 13:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
v5:
* Disable build for unsupported platforms.
v7:
* Update unsupported platforms.
v10:
* Update FPGA register defines.
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   30 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 12005 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..1194ce6aea
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
+    build = false
+    reason = 'only supported on x86_64 Linux'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..2d6a31b35f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1693228548 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 929302248 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 2904641880 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 55459253 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 4051580681 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1693228548, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 929302248, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 2904641880, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 55459253, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 4051580681, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1693228548 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1693228548, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..3948ed3ae3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_MASTER (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SLAVE (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_MASTER_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_MASTER_ID (9363L)
+#define TSM_NTTS_EXT_STAT_MASTER_REV (9364L)
+#define TSM_NTTS_EXT_STAT_MASTER_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v12 2/8] net/ntnic: adds core registers and fpga functionality
  2023-08-31 13:51 ` [PATCH v12 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-08-31 13:51   ` Mykola Kostenok
  2023-08-31 13:51   ` [PATCH v12 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 13:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v9:
* Add missing header
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   72 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15442 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 1194ce6aea..428fc7af98 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -11,13 +11,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..cc6891e82c
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_bitops.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v12 3/8] net/ntnic: adds NT200A02 adapter support
  2023-08-31 13:51 ` [PATCH v12 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-31 13:51   ` [PATCH v12 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-08-31 13:51   ` Mykola Kostenok
  2023-08-31 13:51   ` [PATCH v12 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 13:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..07884e9219
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_master_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_slave_instance = NULL;
+
+	nthw_pcie3 *p_pci_master = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_slave = NULL;
+
+	assert(p_master_instance || p_pci_master);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_master,
+								    pri);
+			}
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_slave,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_enable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_enable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_enable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_enable(p_pci_slave);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_master_instance)
+				nthw_hif_stat_req_disable(p_master_instance);
+			if (p_pci_master)
+				nthw_pcie3_stat_req_disable(p_pci_master);
+
+			if (p_slave_instance)
+				nthw_hif_stat_req_disable(p_slave_instance);
+			if (p_pci_slave)
+				nthw_pcie3_stat_req_disable(p_pci_slave);
+
+			/* Post process master */
+			if (p_master_instance) {
+				nthw_hif_end_point_counters_sample(p_master_instance,
+							       pri);
+			}
+
+			if (p_pci_master) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_master,
+								     pri);
+			}
+
+			/* Post process slave */
+			if (p_slave_instance) {
+				nthw_hif_end_point_counters_sample(p_slave_instance,
+							       sla);
+			}
+
+			if (p_pci_slave) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_slave,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_master_instance) {
+					nthw_hif_stat_req_enable(p_master_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_master_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 428fc7af98..2552b5d68d 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,22 +10,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -35,6 +52,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -50,6 +68,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..c4c6779ce0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave)
+{
+	uint32_t n_block_mask = ~0U << (b_is_slave ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..b40f0a0994
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_slave);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..bac4e925f9
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Master/Slave
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_MASTER, /* Adapter is master in the bonding */
+	NT_BONDING_SLAVE, /* Adapter is slave in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v12 4/8] net/ntnic: adds flow related FPGA functionality
  2023-08-31 13:51 ` [PATCH v12 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-08-31 13:51   ` [PATCH v12 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-08-31 13:51   ` [PATCH v12 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-08-31 13:51   ` Mykola Kostenok
  2023-08-31 13:51   ` [PATCH v12 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 13:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 2552b5d68d..8c065ee9a3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v12 5/8] net/ntnic: adds FPGA abstraction layer
  2023-08-31 13:51 ` [PATCH v12 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-08-31 13:51   ` [PATCH v12 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-08-31 13:51   ` Mykola Kostenok
  2023-08-31 13:51   ` [PATCH v12 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 13:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
v11:
* Fix dereferencing type-punned pointer in macro
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  265 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14376 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8c065ee9a3..8a5a3d5deb 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..bee12b71f7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPCIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..9b4ee1991e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v12 6/8] net/ntnic: adds flow logic
  2023-08-31 13:51 ` [PATCH v12 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-08-31 13:51   ` [PATCH v12 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-08-31 13:51   ` Mykola Kostenok
  2023-08-31 13:51   ` [PATCH v12 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-08-31 13:51   ` [PATCH v12 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 13:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v11:
* Replace stdatomic by compiler build-in atomic
* Fix dereferencing type-punned pointer in macro
* Inner offset must exclude VLAN bytes
v12:
* Fix error=array-bounds
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1307 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5128 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10089 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8a5a3d5deb..0ae574f9ca 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -61,8 +61,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..8cdf15663d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+	/* 39 */  {"Invalid priority value"},
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..fcda73106a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	uint64_t n_pkt;
+	uint64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	uint64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		__atomic_store_n(&mtr_stat[mtr_id].stats_mask, stats_mask, __ATOMIC_RELAXED);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	__atomic_store_n(&mtr_stat[mtr_id].stats_mask, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_bytes, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_pkt, 0, __ATOMIC_RELAXED);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (__atomic_load_n(&mtr_stat->stats_mask, __ATOMIC_RELAXED))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				__atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				__atomic_store_n(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_bytes, nb, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_pkt, np, __ATOMIC_RELAXED);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = __atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+			} while (pkt_1 & UINT64_MSB);
+			nb = __atomic_load_n(&mtr_stat[id].n_bytes, __ATOMIC_RELAXED);
+			pkt_2 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+
+	struct flm_v17_mbr_idx_overlay *mbr_id1_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id2_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id3_ptr =
+		(struct flm_v17_mbr_idx_overlay *)(learn_record.mbr_idx + 7);
+	struct flm_v17_mbr_idx_overlay *mbr_id4_ptr =
+		(struct flm_v17_mbr_idx_overlay *)(learn_record.mbr_idx + 7);
+	if (mtr_ids) {
+		mbr_id1_ptr->a = mtr_ids[0];
+		mbr_id2_ptr->b = mtr_ids[1];
+		mbr_id3_ptr->a = mtr_ids[2];
+		mbr_id4_ptr->b = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			__atomic_store_n(&mtr_stat[i].n_pkt, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].n_bytes, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].stats_mask, 0, __ATOMIC_RELAXED);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v12 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-08-31 13:51 ` [PATCH v12 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-08-31 13:51   ` [PATCH v12 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-08-31 13:51   ` Mykola Kostenok
  2023-08-31 13:51   ` [PATCH v12 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 13:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
v5:
* Add ntnic.rst to index file
v10:
* Fix wrong queue id range.
v11:
* Repace stdatomic by compiler build-in atomic.
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  355 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1245 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12511 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index a926155f26..87ac68ee24 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0ae574f9ca..f7454ffb79 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -27,6 +27,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -113,6 +116,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..2c5e47f996
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = {{ 0 }};
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id    = i;
+			queue_ids[i].hw_id = start_queue + i;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id    = i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	__atomic_store_n(&internals->vhid, vhid, __ATOMIC_RELAXED);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..ee0d84ce82
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..b1cc4d2959
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DONT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..6fc90939a1
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1245 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#if RTE_VERSION_NUM(23, 3, 0, 99) > RTE_VERSION
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES            \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#else
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+#endif
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile int32_t started;
+	volatile int32_t dev_attached;
+	volatile int32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile int32_t dma_mapped;
+	volatile int32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && __atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED)) ||
+			(!do_map && !__atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 1, __ATOMIC_RELAXED);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 0, __ATOMIC_RELAXED);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 1, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 0, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(__atomic_load_n(&internal->started, __ATOMIC_RELAXED) &&
+			 __atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 1, __ATOMIC_RELAXED);
+	} else if (__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(!__atomic_load_n(&internal->started, __ATOMIC_RELAXED) ||
+			 !__atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 0, __ATOMIC_RELAXED);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	__atomic_store_n(&internal->dev_attached, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	__atomic_store_n(&internal->dev_attached, 0, __ATOMIC_RELAXED);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	__atomic_store_n(&internal->started, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	__atomic_store_n(&internal->started, 0, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..c0e67ba03d
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = {{ 0 }};
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v12 8/8] net/ntnic: adds socket connection to PMD
  2023-08-31 13:51 ` [PATCH v12 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-08-31 13:51   ` [PATCH v12 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-08-31 13:51   ` Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-08-31 13:51 UTC (permalink / raw)
  To: dev, mko-plv, thomas; +Cc: ckm

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
v8:
* Fixed token parser constant length.
v10:
* Fix uninitialized variables and build warnings.
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1312 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  877 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5357 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index f7454ffb79..ee8cf982ef 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -7,6 +7,22 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -20,6 +36,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -41,6 +58,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..697e101a03
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	memcpy(err_buf, &ntcerr->err_code, sizeof(uint32_t));
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..3d81242524
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1312 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define IN_PORT_TOK "in_port="
+#define VPATH_TOK "vpath="
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(IN_PORT_TOK) && memcmp(tok, IN_PORT_TOK,
+							   strlen(IN_PORT_TOK)) == 0)
+			in_port = atoi(tok + strlen(IN_PORT_TOK));
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(VPATH_TOK) && memcmp(tok, VPATH_TOK, strlen(VPATH_TOK)) == 0)
+			strlcpy(vpath, tok + strlen(VPATH_TOK), MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..437cf9ddad
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,877 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec = 0;
+	int size = 0;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v13 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (16 preceding siblings ...)
  2023-08-31 13:51 ` [PATCH v12 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-09-01 12:18 ` Mykola Kostenok
  2023-09-01 12:18   ` [PATCH v13 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-09-04 13:53 ` [PATCH v14 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (2 subsequent siblings)
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-01 12:18 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
v5:
* Disable build for unsupported platforms.
v7:
* Update unsupported platforms.
v10:
* Update FPGA register defines.
v13:
* Fix typo spelling warnings
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   30 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4181 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7211 +++++++++++++++++
 9 files changed, 11930 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..1194ce6aea
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
+    build = false
+    reason = 'only supported on x86_64 Linux'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..87b921da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1693228548 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 929302248 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 2904641880 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 55459253 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 4051580681 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1693228548, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 929302248, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 2904641880, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 55459253, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 4051580681, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1693228548 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1693228548, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..54db76b73e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v13 2/8] net/ntnic: adds core registers and fpga functionality
  2023-09-01 12:18 ` [PATCH v13 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-09-01 12:18   ` Mykola Kostenok
  2023-09-01 12:18   ` [PATCH v13 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-01 12:18 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v9:
* Add missing header
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   72 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15442 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 1194ce6aea..428fc7af98 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -11,13 +11,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..cc6891e82c
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_bitops.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v13 3/8] net/ntnic: adds NT200A02 adapter support
  2023-09-01 12:18 ` [PATCH v13 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-09-01 12:18   ` [PATCH v13 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-09-01 12:18   ` Mykola Kostenok
  2023-09-01 12:18   ` [PATCH v13 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-01 12:18 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v13:
* Fix typo spelling warnings
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..5cbe7fcae9
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_root_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_endpoint_instance = NULL;
+
+	nthw_pcie3 *p_pci_root = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_endpoint = NULL;
+
+	assert(p_root_instance || p_pci_root);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_root) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_root,
+								    pri);
+			}
+			if (p_pci_endpoint) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_endpoint,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_root_instance)
+				nthw_hif_stat_req_enable(p_root_instance);
+			if (p_pci_root)
+				nthw_pcie3_stat_req_enable(p_pci_root);
+
+			if (p_endpoint_instance)
+				nthw_hif_stat_req_enable(p_endpoint_instance);
+			if (p_pci_endpoint)
+				nthw_pcie3_stat_req_enable(p_pci_endpoint);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_root_instance)
+				nthw_hif_stat_req_disable(p_root_instance);
+			if (p_pci_root)
+				nthw_pcie3_stat_req_disable(p_pci_root);
+
+			if (p_endpoint_instance)
+				nthw_hif_stat_req_disable(p_endpoint_instance);
+			if (p_pci_endpoint)
+				nthw_pcie3_stat_req_disable(p_pci_endpoint);
+
+			/* Post process root */
+			if (p_root_instance) {
+				nthw_hif_end_point_counters_sample(p_root_instance,
+							       pri);
+			}
+
+			if (p_pci_root) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_root,
+								     pri);
+			}
+
+			/* Post process endpoint */
+			if (p_endpoint_instance) {
+				nthw_hif_end_point_counters_sample(p_endpoint_instance,
+							       sla);
+			}
+
+			if (p_pci_endpoint) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_endpoint,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_root_instance) {
+					nthw_hif_stat_req_enable(p_root_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_root_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 428fc7af98..2552b5d68d 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,22 +10,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -35,6 +52,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -50,6 +68,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..ec32dd88e6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_secondary)
+{
+	uint32_t n_block_mask = ~0U << (b_is_secondary ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..2df4462287
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_secondary);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..e944dca5ce
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Primary/Secondary
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_PRIMARY, /* Adapter is primary in the bonding */
+	NT_BONDING_SECONDARY, /* Adapter is secondary in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v13 4/8] net/ntnic: adds flow related FPGA functionality
  2023-09-01 12:18 ` [PATCH v13 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-09-01 12:18   ` [PATCH v13 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-09-01 12:18   ` [PATCH v13 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-09-01 12:18   ` Mykola Kostenok
  2023-09-01 12:18   ` [PATCH v13 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-01 12:18 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 2552b5d68d..8c065ee9a3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v13 5/8] net/ntnic: adds FPGA abstraction layer
  2023-09-01 12:18 ` [PATCH v13 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-09-01 12:18   ` [PATCH v13 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-09-01 12:18   ` Mykola Kostenok
  2023-09-01 12:18   ` [PATCH v13 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-01 12:18 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
v11:
* Fix dereferencing type-punned pointer in macro
v13:
* Fix typo spelling warnings
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  265 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14376 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8c065ee9a3..8a5a3d5deb 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..b63730c07e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPECIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..9b4ee1991e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v13 6/8] net/ntnic: adds flow logic
  2023-09-01 12:18 ` [PATCH v13 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-09-01 12:18   ` [PATCH v13 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-09-01 12:18   ` Mykola Kostenok
  2023-09-01 12:18   ` [PATCH v13 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-09-01 12:18   ` [PATCH v13 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-01 12:18 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v11:
* Replace stdatomic by compiler build-in atomic
* Fix dereferencing type-punned pointer in macro
* Inner offset must exclude VLAN bytes
v12:
* Fix error=array-bounds
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1307 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5128 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10089 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8a5a3d5deb..0ae574f9ca 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -61,8 +61,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..8cdf15663d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+	/* 39 */  {"Invalid priority value"},
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..fcda73106a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	uint64_t n_pkt;
+	uint64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	uint64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		__atomic_store_n(&mtr_stat[mtr_id].stats_mask, stats_mask, __ATOMIC_RELAXED);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	__atomic_store_n(&mtr_stat[mtr_id].stats_mask, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_bytes, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_pkt, 0, __ATOMIC_RELAXED);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (__atomic_load_n(&mtr_stat->stats_mask, __ATOMIC_RELAXED))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				__atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				__atomic_store_n(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_bytes, nb, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_pkt, np, __ATOMIC_RELAXED);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = __atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+			} while (pkt_1 & UINT64_MSB);
+			nb = __atomic_load_n(&mtr_stat[id].n_bytes, __ATOMIC_RELAXED);
+			pkt_2 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+
+	struct flm_v17_mbr_idx_overlay *mbr_id1_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id2_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id3_ptr =
+		(struct flm_v17_mbr_idx_overlay *)(learn_record.mbr_idx + 7);
+	struct flm_v17_mbr_idx_overlay *mbr_id4_ptr =
+		(struct flm_v17_mbr_idx_overlay *)(learn_record.mbr_idx + 7);
+	if (mtr_ids) {
+		mbr_id1_ptr->a = mtr_ids[0];
+		mbr_id2_ptr->b = mtr_ids[1];
+		mbr_id3_ptr->a = mtr_ids[2];
+		mbr_id4_ptr->b = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != 1)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != l2_length)
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			__atomic_store_n(&mtr_stat[i].n_pkt, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].n_bytes, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].stats_mask, 0, __ATOMIC_RELAXED);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v13 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-09-01 12:18 ` [PATCH v13 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-09-01 12:18   ` [PATCH v13 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-09-01 12:18   ` Mykola Kostenok
  2023-09-01 12:18   ` [PATCH v13 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-01 12:18 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
v5:
* Add ntnic.rst to index file
v10:
* Fix wrong queue id range.
v11:
* Repace stdatomic by compiler build-in atomic.
v13:
* Fix typo spelling warnings
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  355 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1235 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12501 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index a926155f26..87ac68ee24 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0ae574f9ca..f7454ffb79 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -27,6 +27,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -113,6 +116,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..2c5e47f996
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = {{ 0 }};
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id    = i;
+			queue_ids[i].hw_id = start_queue + i;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id    = i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	__atomic_store_n(&internals->vhid, vhid, __ATOMIC_RELAXED);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..ee0d84ce82
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..6b19c2308e
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DO_NOT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..4125bc50c9
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1235 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile int32_t started;
+	volatile int32_t dev_attached;
+	volatile int32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile int32_t dma_mapped;
+	volatile int32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && __atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED)) ||
+			(!do_map && !__atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 1, __ATOMIC_RELAXED);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 0, __ATOMIC_RELAXED);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 1, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 0, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(__atomic_load_n(&internal->started, __ATOMIC_RELAXED) &&
+			 __atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 1, __ATOMIC_RELAXED);
+	} else if (__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(!__atomic_load_n(&internal->started, __ATOMIC_RELAXED) ||
+			 !__atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 0, __ATOMIC_RELAXED);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	__atomic_store_n(&internal->dev_attached, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	__atomic_store_n(&internal->dev_attached, 0, __ATOMIC_RELAXED);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	__atomic_store_n(&internal->started, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	__atomic_store_n(&internal->started, 0, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..c0e67ba03d
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = {{ 0 }};
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v13 8/8] net/ntnic: adds socket connection to PMD
  2023-09-01 12:18 ` [PATCH v13 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-09-01 12:18   ` [PATCH v13 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-09-01 12:18   ` Mykola Kostenok
  2023-09-02 17:26     ` Patrick Robb
  6 siblings, 1 reply; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-01 12:18 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
v8:
* Fixed token parser constant length.
v10:
* Fix uninitialized variables and build warnings.
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1312 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  877 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5357 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index f7454ffb79..ee8cf982ef 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -7,6 +7,22 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -20,6 +36,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -41,6 +58,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..697e101a03
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	memcpy(err_buf, &ntcerr->err_code, sizeof(uint32_t));
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..3d81242524
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1312 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define IN_PORT_TOK "in_port="
+#define VPATH_TOK "vpath="
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(IN_PORT_TOK) && memcmp(tok, IN_PORT_TOK,
+							   strlen(IN_PORT_TOK)) == 0)
+			in_port = atoi(tok + strlen(IN_PORT_TOK));
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(VPATH_TOK) && memcmp(tok, VPATH_TOK, strlen(VPATH_TOK)) == 0)
+			strlcpy(vpath, tok + strlen(VPATH_TOK), MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..437cf9ddad
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,877 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec = 0;
+	int size = 0;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* Re: [PATCH v13 8/8] net/ntnic: adds socket connection to PMD
  2023-09-01 12:18   ` [PATCH v13 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
@ 2023-09-02 17:26     ` Patrick Robb
  0 siblings, 0 replies; 142+ messages in thread
From: Patrick Robb @ 2023-09-02 17:26 UTC (permalink / raw)
  To: Mykola Kostenok; +Cc: dev, thomas, ckm, andrew.rybchenko, ferruh.yigit

[-- Attachment #1: Type: text/plain, Size: 136 bytes --]

ARM Ampere server test fails on this patch are lab infra-failures (I did
some updates on the server yesterday) and they can be ignored.

[-- Attachment #2: Type: text/html, Size: 185 bytes --]

^ permalink raw reply	[flat|nested] 142+ messages in thread

* [PATCH v14 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (17 preceding siblings ...)
  2023-09-01 12:18 ` [PATCH v13 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-09-04 13:53 ` Mykola Kostenok
  2023-09-04 13:53   ` [PATCH v14 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-09-05 14:54 ` [PATCH v15 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-09-08 16:07 ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-04 13:53 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   30 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4181 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7211 +++++++++++++++++
 9 files changed, 11930 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..1194ce6aea
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
+    build = false
+    reason = 'only supported on x86_64 Linux'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..87b921da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1693228548 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 929302248 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 2904641880 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 55459253 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 4051580681 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1693228548, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 929302248, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 2904641880, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 55459253, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 4051580681, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1693228548 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1693228548, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..54db76b73e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v14 2/8] net/ntnic: adds core registers and fpga functionality
  2023-09-04 13:53 ` [PATCH v14 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-09-04 13:53   ` Mykola Kostenok
  2023-09-04 13:53   ` [PATCH v14 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-04 13:53 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   72 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15442 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 1194ce6aea..428fc7af98 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -11,13 +11,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..cc6891e82c
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_bitops.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v14 3/8] net/ntnic: adds NT200A02 adapter support
  2023-09-04 13:53 ` [PATCH v14 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-09-04 13:53   ` [PATCH v14 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-09-04 13:53   ` Mykola Kostenok
  2023-09-04 13:53   ` [PATCH v14 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-04 13:53 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..5cbe7fcae9
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_root_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_endpoint_instance = NULL;
+
+	nthw_pcie3 *p_pci_root = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_endpoint = NULL;
+
+	assert(p_root_instance || p_pci_root);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_root) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_root,
+								    pri);
+			}
+			if (p_pci_endpoint) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_endpoint,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_root_instance)
+				nthw_hif_stat_req_enable(p_root_instance);
+			if (p_pci_root)
+				nthw_pcie3_stat_req_enable(p_pci_root);
+
+			if (p_endpoint_instance)
+				nthw_hif_stat_req_enable(p_endpoint_instance);
+			if (p_pci_endpoint)
+				nthw_pcie3_stat_req_enable(p_pci_endpoint);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_root_instance)
+				nthw_hif_stat_req_disable(p_root_instance);
+			if (p_pci_root)
+				nthw_pcie3_stat_req_disable(p_pci_root);
+
+			if (p_endpoint_instance)
+				nthw_hif_stat_req_disable(p_endpoint_instance);
+			if (p_pci_endpoint)
+				nthw_pcie3_stat_req_disable(p_pci_endpoint);
+
+			/* Post process root */
+			if (p_root_instance) {
+				nthw_hif_end_point_counters_sample(p_root_instance,
+							       pri);
+			}
+
+			if (p_pci_root) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_root,
+								     pri);
+			}
+
+			/* Post process endpoint */
+			if (p_endpoint_instance) {
+				nthw_hif_end_point_counters_sample(p_endpoint_instance,
+							       sla);
+			}
+
+			if (p_pci_endpoint) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_endpoint,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_root_instance) {
+					nthw_hif_stat_req_enable(p_root_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_root_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 428fc7af98..2552b5d68d 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,22 +10,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -35,6 +52,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -50,6 +68,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..55740e6de6
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Whitelist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..ec32dd88e6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_secondary)
+{
+	uint32_t n_block_mask = ~0U << (b_is_secondary ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..2df4462287
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_secondary);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..e944dca5ce
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Primary/Secondary
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_PRIMARY, /* Adapter is primary in the bonding */
+	NT_BONDING_SECONDARY, /* Adapter is secondary in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v14 4/8] net/ntnic: adds flow related FPGA functionality
  2023-09-04 13:53 ` [PATCH v14 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-09-04 13:53   ` [PATCH v14 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-09-04 13:53   ` [PATCH v14 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-09-04 13:53   ` Mykola Kostenok
  2023-09-04 13:53   ` [PATCH v14 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-04 13:53 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 2552b5d68d..8c065ee9a3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v14 5/8] net/ntnic: adds FPGA abstraction layer
  2023-09-04 13:53 ` [PATCH v14 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-09-04 13:53   ` [PATCH v14 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-09-04 13:53   ` Mykola Kostenok
  2023-09-04 13:53   ` [PATCH v14 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-04 13:53 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  265 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14376 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8c065ee9a3..8a5a3d5deb 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..b63730c07e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPECIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..9b4ee1991e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v14 6/8] net/ntnic: adds flow logic
  2023-09-04 13:53 ` [PATCH v14 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-09-04 13:53   ` [PATCH v14 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-09-04 13:53   ` Mykola Kostenok
  2023-09-04 13:54   ` [PATCH v14 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-09-04 13:54   ` [PATCH v14 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-04 13:53 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1307 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5130 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10091 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8a5a3d5deb..0ae574f9ca 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -61,8 +61,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..8cdf15663d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+	/* 39 */  {"Invalid priority value"},
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..82d7f8b1c9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5130 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	uint64_t n_pkt;
+	uint64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	uint64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		__atomic_store_n(&mtr_stat[mtr_id].stats_mask, stats_mask, __ATOMIC_RELAXED);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	__atomic_store_n(&mtr_stat[mtr_id].stats_mask, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_bytes, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_pkt, 0, __ATOMIC_RELAXED);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (__atomic_load_n(&mtr_stat->stats_mask, __ATOMIC_RELAXED))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				__atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				__atomic_store_n(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_bytes, nb, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_pkt, np, __ATOMIC_RELAXED);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = __atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+			} while (pkt_1 & UINT64_MSB);
+			nb = __atomic_load_n(&mtr_stat[id].n_bytes, __ATOMIC_RELAXED);
+			pkt_2 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+
+	struct flm_v17_mbr_idx_overlay *mbr_id1_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id2_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id3_ptr =
+		(struct flm_v17_mbr_idx_overlay *)(learn_record.mbr_idx + 7);
+	struct flm_v17_mbr_idx_overlay *mbr_id4_ptr =
+		(struct flm_v17_mbr_idx_overlay *)(learn_record.mbr_idx + 7);
+	if (mtr_ids) {
+		mbr_id1_ptr->a = mtr_ids[0];
+		mbr_id2_ptr->b = mtr_ids[1];
+		mbr_id3_ptr->a = mtr_ids[2];
+		mbr_id4_ptr->b = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != (fd->tun_hdr.len > 0 ? 1 : 0))
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.new_outer ?
+				      (fd->tun_hdr.ip_version == 4 ? 1 : 2) : 0))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != (fd->tun_hdr.new_outer ? l2_length :
+				      (fd->tun_hdr.len == 0 ? 0 : fd->tun_hdr.len - eth_length)))
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			__atomic_store_n(&mtr_stat[i].n_pkt, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].n_bytes, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].stats_mask, 0, __ATOMIC_RELAXED);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v14 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-09-04 13:53 ` [PATCH v14 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-09-04 13:53   ` [PATCH v14 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-09-04 13:54   ` Mykola Kostenok
  2023-09-04 13:54   ` [PATCH v14 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-04 13:54 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  355 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1235 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12501 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 698608cdb2..fbe19449c2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0ae574f9ca..f7454ffb79 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -27,6 +27,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -113,6 +116,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..2c5e47f996
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = {{ 0 }};
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id    = i;
+			queue_ids[i].hw_id = start_queue + i;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id    = i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	__atomic_store_n(&internals->vhid, vhid, __ATOMIC_RELAXED);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..ee0d84ce82
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..6b19c2308e
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DO_NOT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..4125bc50c9
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1235 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile int32_t started;
+	volatile int32_t dev_attached;
+	volatile int32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile int32_t dma_mapped;
+	volatile int32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && __atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED)) ||
+			(!do_map && !__atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 1, __ATOMIC_RELAXED);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 0, __ATOMIC_RELAXED);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 1, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 0, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(__atomic_load_n(&internal->started, __ATOMIC_RELAXED) &&
+			 __atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 1, __ATOMIC_RELAXED);
+	} else if (__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(!__atomic_load_n(&internal->started, __ATOMIC_RELAXED) ||
+			 !__atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 0, __ATOMIC_RELAXED);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	__atomic_store_n(&internal->dev_attached, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	__atomic_store_n(&internal->dev_attached, 0, __ATOMIC_RELAXED);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	__atomic_store_n(&internal->started, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	__atomic_store_n(&internal->started, 0, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..c0e67ba03d
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = {{ 0 }};
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v14 8/8] net/ntnic: adds socket connection to PMD
  2023-09-04 13:53 ` [PATCH v14 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-09-04 13:54   ` [PATCH v14 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-09-04 13:54   ` Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-04 13:54 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1312 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  877 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5357 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index f7454ffb79..ee8cf982ef 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -7,6 +7,22 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -20,6 +36,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -41,6 +58,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..697e101a03
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	memcpy(err_buf, &ntcerr->err_code, sizeof(uint32_t));
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..3d81242524
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1312 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define IN_PORT_TOK "in_port="
+#define VPATH_TOK "vpath="
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(IN_PORT_TOK) && memcmp(tok, IN_PORT_TOK,
+							   strlen(IN_PORT_TOK)) == 0)
+			in_port = atoi(tok + strlen(IN_PORT_TOK));
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(VPATH_TOK) && memcmp(tok, VPATH_TOK, strlen(VPATH_TOK)) == 0)
+			strlcpy(vpath, tok + strlen(VPATH_TOK), MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..437cf9ddad
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,877 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec = 0;
+	int size = 0;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v15 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (18 preceding siblings ...)
  2023-09-04 13:53 ` [PATCH v14 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-09-05 14:54 ` Mykola Kostenok
  2023-09-05 14:54   ` [PATCH v15 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (6 more replies)
  2023-09-08 16:07 ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  20 siblings, 7 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-05 14:54 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
v5:
* Disable build for unsupported platforms.
v7:
* Update unsupported platforms.
v10:
* Update FPGA register defines.
v13:
* Fix typo spelling warnings
v15:
* Update FPGA register defines.
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   30 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4181 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7211 +++++++++++++++++
 9 files changed, 11930 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..1194ce6aea
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
+    build = false
+    reason = 'only supported on x86_64 Linux'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..d24f93f9c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1693492863 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 3215833203 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 2947535663 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 1243492979 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 2500373735 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1693492863, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 3215833203, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 2947535663, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 1243492979, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 2500373735, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1693492863 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1693492863, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..54db76b73e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v15 2/8] net/ntnic: adds core registers and fpga functionality
  2023-09-05 14:54 ` [PATCH v15 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-09-05 14:54   ` Mykola Kostenok
  2023-09-05 14:54   ` [PATCH v15 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-05 14:54 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v9:
* Add missing header
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   72 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15442 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 1194ce6aea..428fc7af98 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -11,13 +11,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..cc6891e82c
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_bitops.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v15 3/8] net/ntnic: adds NT200A02 adapter support
  2023-09-05 14:54 ` [PATCH v15 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-09-05 14:54   ` [PATCH v15 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-09-05 14:54   ` Mykola Kostenok
  2023-09-05 14:54   ` [PATCH v15 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-05 14:54 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v13:
* Fix typo spelling warnings
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..5cbe7fcae9
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_root_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_endpoint_instance = NULL;
+
+	nthw_pcie3 *p_pci_root = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_endpoint = NULL;
+
+	assert(p_root_instance || p_pci_root);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_root) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_root,
+								    pri);
+			}
+			if (p_pci_endpoint) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_endpoint,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_root_instance)
+				nthw_hif_stat_req_enable(p_root_instance);
+			if (p_pci_root)
+				nthw_pcie3_stat_req_enable(p_pci_root);
+
+			if (p_endpoint_instance)
+				nthw_hif_stat_req_enable(p_endpoint_instance);
+			if (p_pci_endpoint)
+				nthw_pcie3_stat_req_enable(p_pci_endpoint);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_root_instance)
+				nthw_hif_stat_req_disable(p_root_instance);
+			if (p_pci_root)
+				nthw_pcie3_stat_req_disable(p_pci_root);
+
+			if (p_endpoint_instance)
+				nthw_hif_stat_req_disable(p_endpoint_instance);
+			if (p_pci_endpoint)
+				nthw_pcie3_stat_req_disable(p_pci_endpoint);
+
+			/* Post process root */
+			if (p_root_instance) {
+				nthw_hif_end_point_counters_sample(p_root_instance,
+							       pri);
+			}
+
+			if (p_pci_root) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_root,
+								     pri);
+			}
+
+			/* Post process endpoint */
+			if (p_endpoint_instance) {
+				nthw_hif_end_point_counters_sample(p_endpoint_instance,
+							       sla);
+			}
+
+			if (p_pci_endpoint) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_endpoint,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_root_instance) {
+					nthw_hif_stat_req_enable(p_root_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_root_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 428fc7af98..2552b5d68d 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,22 +10,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -35,6 +52,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -50,6 +68,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..1c514d0300
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Allowlist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..ec32dd88e6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_secondary)
+{
+	uint32_t n_block_mask = ~0U << (b_is_secondary ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..2df4462287
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_secondary);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..e944dca5ce
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Primary/Secondary
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_PRIMARY, /* Adapter is primary in the bonding */
+	NT_BONDING_SECONDARY, /* Adapter is secondary in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v15 4/8] net/ntnic: adds flow related FPGA functionality
  2023-09-05 14:54 ` [PATCH v15 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-09-05 14:54   ` [PATCH v15 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-09-05 14:54   ` [PATCH v15 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-09-05 14:54   ` Mykola Kostenok
  2023-09-05 14:54   ` [PATCH v15 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-05 14:54 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 2552b5d68d..8c065ee9a3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v15 5/8] net/ntnic: adds FPGA abstraction layer
  2023-09-05 14:54 ` [PATCH v15 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-09-05 14:54   ` [PATCH v15 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-09-05 14:54   ` Mykola Kostenok
  2023-09-05 14:54   ` [PATCH v15 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-05 14:54 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
v11:
* Fix dereferencing type-punned pointer in macro
v13:
* Fix typo spelling warnings
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  265 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14376 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8c065ee9a3..8a5a3d5deb 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..b63730c07e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPECIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..9b4ee1991e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v15 6/8] net/ntnic: adds flow logic
  2023-09-05 14:54 ` [PATCH v15 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-09-05 14:54   ` [PATCH v15 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-09-05 14:54   ` Mykola Kostenok
  2023-09-05 14:54   ` [PATCH v15 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
  2023-09-05 14:54   ` [PATCH v15 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-05 14:54 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v11:
* Replace stdatomic by compiler build-in atomic
* Fix dereferencing type-punned pointer in macro
* Inner offset must exclude VLAN bytes
v12:
* Fix error=array-bounds
v14:
* Fixed code checking for TPE resource reuse
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1307 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5130 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10091 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8a5a3d5deb..0ae574f9ca 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -61,8 +61,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..8cdf15663d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+	/* 39 */  {"Invalid priority value"},
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..82d7f8b1c9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5130 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	uint64_t n_pkt;
+	uint64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	uint64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		__atomic_store_n(&mtr_stat[mtr_id].stats_mask, stats_mask, __ATOMIC_RELAXED);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	__atomic_store_n(&mtr_stat[mtr_id].stats_mask, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_bytes, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_pkt, 0, __ATOMIC_RELAXED);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (__atomic_load_n(&mtr_stat->stats_mask, __ATOMIC_RELAXED))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				__atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				__atomic_store_n(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_bytes, nb, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_pkt, np, __ATOMIC_RELAXED);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = __atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+			} while (pkt_1 & UINT64_MSB);
+			nb = __atomic_load_n(&mtr_stat[id].n_bytes, __ATOMIC_RELAXED);
+			pkt_2 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+
+	struct flm_v17_mbr_idx_overlay *mbr_id1_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id2_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id3_ptr =
+		(struct flm_v17_mbr_idx_overlay *)(learn_record.mbr_idx + 7);
+	struct flm_v17_mbr_idx_overlay *mbr_id4_ptr =
+		(struct flm_v17_mbr_idx_overlay *)(learn_record.mbr_idx + 7);
+	if (mtr_ids) {
+		mbr_id1_ptr->a = mtr_ids[0];
+		mbr_id2_ptr->b = mtr_ids[1];
+		mbr_id3_ptr->a = mtr_ids[2];
+		mbr_id4_ptr->b = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != (fd->tun_hdr.len > 0 ? 1 : 0))
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.new_outer ?
+				      (fd->tun_hdr.ip_version == 4 ? 1 : 2) : 0))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != (fd->tun_hdr.new_outer ? l2_length :
+				      (fd->tun_hdr.len == 0 ? 0 : fd->tun_hdr.len - eth_length)))
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			__atomic_store_n(&mtr_stat[i].n_pkt, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].n_bytes, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].stats_mask, 0, __ATOMIC_RELAXED);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v15 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-09-05 14:54 ` [PATCH v15 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-09-05 14:54   ` [PATCH v15 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-09-05 14:54   ` Mykola Kostenok
  2023-09-05 14:54   ` [PATCH v15 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-05 14:54 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
v5:
* Add ntnic.rst to index file
v10:
* Fix wrong queue id range.
v11:
* Repace stdatomic by compiler build-in atomic.
v13:
* Fix typo spelling warnings
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  355 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1235 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12501 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 698608cdb2..fbe19449c2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0ae574f9ca..f7454ffb79 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -27,6 +27,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -113,6 +116,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..2c5e47f996
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = {{ 0 }};
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id    = i;
+			queue_ids[i].hw_id = start_queue + i;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id    = i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	__atomic_store_n(&internals->vhid, vhid, __ATOMIC_RELAXED);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..ee0d84ce82
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..6b19c2308e
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DO_NOT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..4125bc50c9
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1235 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile int32_t started;
+	volatile int32_t dev_attached;
+	volatile int32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile int32_t dma_mapped;
+	volatile int32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && __atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED)) ||
+			(!do_map && !__atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 1, __ATOMIC_RELAXED);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 0, __ATOMIC_RELAXED);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 1, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 0, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(__atomic_load_n(&internal->started, __ATOMIC_RELAXED) &&
+			 __atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 1, __ATOMIC_RELAXED);
+	} else if (__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(!__atomic_load_n(&internal->started, __ATOMIC_RELAXED) ||
+			 !__atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 0, __ATOMIC_RELAXED);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	__atomic_store_n(&internal->dev_attached, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	__atomic_store_n(&internal->dev_attached, 0, __ATOMIC_RELAXED);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	__atomic_store_n(&internal->started, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	__atomic_store_n(&internal->started, 0, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..c0e67ba03d
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = {{ 0 }};
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v15 8/8] net/ntnic: adds socket connection to PMD
  2023-09-05 14:54 ` [PATCH v15 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-09-05 14:54   ` [PATCH v15 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-09-05 14:54   ` Mykola Kostenok
  6 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-05 14:54 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
v8:
* Fixed token parser constant length.
v10:
* Fix uninitialized variables and build warnings.
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1312 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  877 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5357 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index f7454ffb79..ee8cf982ef 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -7,6 +7,22 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -20,6 +36,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -41,6 +58,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..697e101a03
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	memcpy(err_buf, &ntcerr->err_code, sizeof(uint32_t));
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..3d81242524
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1312 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define IN_PORT_TOK "in_port="
+#define VPATH_TOK "vpath="
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(IN_PORT_TOK) && memcmp(tok, IN_PORT_TOK,
+							   strlen(IN_PORT_TOK)) == 0)
+			in_port = atoi(tok + strlen(IN_PORT_TOK));
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(VPATH_TOK) && memcmp(tok, VPATH_TOK, strlen(VPATH_TOK)) == 0)
+			strlcpy(vpath, tok + strlen(VPATH_TOK), MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..437cf9ddad
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,877 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec = 0;
+	int size = 0;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                   ` (19 preceding siblings ...)
  2023-09-05 14:54 ` [PATCH v15 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-09-08 16:07 ` Mykola Kostenok
  2023-09-08 16:07   ` [PATCH v16 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
                     ` (7 more replies)
  20 siblings, 8 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-08 16:07 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The NTNIC PMD does not rely on a kernel space Napatech driver,
thus all defines related to the register layout is part of the PMD
code, which will be added in later commits.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v3:
* Fixed not needed cflags as suggested in comments.
v5:
* Disable build for unsupported platforms.
v7:
* Update unsupported platforms.
v10:
* Update FPGA register defines.
v13:
* Fix typo spelling warnings
v15:
* Update FPGA register defines.
---
 drivers/net/meson.build                       |    1 +
 drivers/net/ntnic/include/fpga_model.h        |   99 +
 drivers/net/ntnic/meson.build                 |   30 +
 drivers/net/ntnic/nthw/nthw_register.h        |   19 +
 .../supported/nthw_fpga_9563_055_024_0000.c   | 4190 ++++++++++
 .../nthw/supported/nthw_fpga_instances.h      |   14 +
 .../nthw/supported/nthw_fpga_modules_defs.h   |  166 +
 .../supported/nthw_fpga_parameters_defs.h     |  209 +
 .../nthw/supported/nthw_fpga_registers_defs.h | 7277 +++++++++++++++++
 9 files changed, 12005 insertions(+)
 create mode 100644 drivers/net/ntnic/include/fpga_model.h
 create mode 100644 drivers/net/ntnic/meson.build
 create mode 100644 drivers/net/ntnic/nthw/nthw_register.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h

diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b533c5..fb6d34b782 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -45,6 +45,7 @@ drivers = [
         'nfb',
         'nfp',
         'ngbe',
+        'ntnic',
         'null',
         'octeontx',
         'octeon_ep',
diff --git a/drivers/net/ntnic/include/fpga_model.h b/drivers/net/ntnic/include/fpga_model.h
new file mode 100644
index 0000000000..89f1ae9736
--- /dev/null
+++ b/drivers/net/ntnic/include/fpga_model.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef FPGA_MODEL_H_
+#define FPGA_MODEL_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+enum nt_fpga_bus_type {
+	BUS_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	BUS_TYPE_BAR,
+	BUS_TYPE_PCI,
+	BUS_TYPE_CCIP,
+	BUS_TYPE_RAB0,
+	BUS_TYPE_RAB1,
+	BUS_TYPE_RAB2,
+	BUS_TYPE_NMB,
+	BUS_TYPE_NDM,
+	BUS_TYPE_SPI0,
+	BUS_TYPE_SPI = BUS_TYPE_SPI0,
+};
+
+typedef enum nt_fpga_bus_type nt_fpga_bus_type_t;
+
+enum nt_fpga_register_type {
+	REGISTER_TYPE_UNKNOWN =
+		0, /* Unknown/uninitialized - keep this as the first enum element */
+	REGISTER_TYPE_RW,
+	REGISTER_TYPE_RO,
+	REGISTER_TYPE_WO,
+	REGISTER_TYPE_RC1,
+	REGISTER_TYPE_MIXED,
+};
+
+typedef enum nt_fpga_register_type nt_fpga_register_type_t;
+
+struct nt_fpga_field_init {
+	int id;
+	uint16_t bw;
+	uint16_t low;
+	uint64_t reset_val;
+};
+
+typedef struct nt_fpga_field_init nt_fpga_field_init_t;
+
+struct nt_fpga_register_init {
+	int id;
+	uint32_t addr_rel;
+	uint16_t bw;
+	nt_fpga_register_type_t type;
+	uint64_t reset_val;
+	int nb_fields;
+	struct nt_fpga_field_init *fields;
+};
+
+typedef struct nt_fpga_register_init nt_fpga_register_init_t;
+
+struct nt_fpga_module_init {
+	int id;
+	int instance;
+	int def_id;
+	int major_version;
+	int minor_version;
+	nt_fpga_bus_type_t bus_id;
+	uint32_t addr_base;
+	int nb_registers;
+	struct nt_fpga_register_init *registers;
+};
+
+typedef struct nt_fpga_module_init nt_fpga_module_init_t;
+
+struct nt_fpga_prod_param {
+	const int param_id;
+	const int param_value;
+};
+
+typedef struct nt_fpga_prod_param nt_fpga_prod_param_t;
+
+struct nt_fpga_prod_init {
+	int fpga_item_id;
+	int fpga_product_id;
+	int fpga_version;
+	int fpga_revision;
+	int fpga_patch_no;
+	int fpga_build_no;
+	uint32_t fpga_build_time;
+	int nb_prod_params;
+	struct nt_fpga_prod_param *product_params;
+	int nb_modules;
+	struct nt_fpga_module_init *modules;
+};
+
+typedef struct nt_fpga_prod_init nt_fpga_prod_init_t;
+
+#endif /* FPGA_MODEL_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
new file mode 100644
index 0000000000..1194ce6aea
--- /dev/null
+++ b/drivers/net/ntnic/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020-2023 Napatech A/S
+
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
+    build = false
+    reason = 'only supported on x86_64 Linux'
+    subdir_done()
+endif
+
+# includes
+includes = [
+    include_directories('.'),
+    include_directories('include'),
+    include_directories('nthw'),
+    include_directories('nthw/supported'),
+]
+
+# all sources
+sources = files(
+    'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+)
+
+if is_variable('default_cflags')
+      cflags += default_cflags
+else
+      cflags += machine_args
+      cflags += ['-DALLOW_INTERNAL_API']
+endif
+
+# END
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
new file mode 100644
index 0000000000..5cdbd9fc5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_REGISTER_H_
+#define NTHW_REGISTER_H_
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "fpga_model.h"
+
+#include "nthw_fpga_modules_defs.h"
+#include "nthw_fpga_parameters_defs.h"
+#include "nthw_fpga_registers_defs.h"
+
+#endif /* NTHW_REGISTER_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
new file mode 100644
index 0000000000..6d8916db14
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_024_0000.c
@@ -0,0 +1,4190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_register.h"
+
+static nt_fpga_field_init_t cat_cct_ctrl_fields[] = {
+	{ CAT_CCT_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_CCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cct_data_fields[] = {
+	{ CAT_CCT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_CCT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_ctrl_fields[] = {
+	{ CAT_CFN_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CFN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cfn_data_fields[] = {
+	{ CAT_CFN_DATA_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CFN_DATA_ERR_CV, 2, 99, 0x0000 },
+	{ CAT_CFN_DATA_ERR_FCS, 2, 101, 0x0000 },
+	{ CAT_CFN_DATA_ERR_INV, 1, 98, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L3_CS, 2, 105, 0x0000 },
+	{ CAT_CFN_DATA_ERR_L4_CS, 2, 107, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L3_CS, 2, 109, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_L4_CS, 2, 111, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TNL_TTL_EXP, 2, 115, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TRUNC, 2, 103, 0x0000 },
+	{ CAT_CFN_DATA_ERR_TTL_EXP, 2, 113, 0x0000 },
+	{ CAT_CFN_DATA_INV, 1, 1, 0x0000 },
+	{ CAT_CFN_DATA_KM0_OR, 3, 173, 0x0000 },
+	{ CAT_CFN_DATA_KM1_OR, 3, 176, 0x0000 },
+	{ CAT_CFN_DATA_LC, 8, 164, 0x0000 },
+	{ CAT_CFN_DATA_LC_INV, 1, 172, 0x0000 },
+	{ CAT_CFN_DATA_MAC_PORT, 2, 117, 0x0000 },
+	{ CAT_CFN_DATA_PM_AND_INV, 1, 161, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMB, 4, 157, 0x0000 },
+	{ CAT_CFN_DATA_PM_CMP, 32, 119, 0x0000 },
+	{ CAT_CFN_DATA_PM_DCT, 2, 151, 0x0000 },
+	{ CAT_CFN_DATA_PM_EXT_INV, 4, 153, 0x0000 },
+	{ CAT_CFN_DATA_PM_INV, 1, 163, 0x0000 },
+	{ CAT_CFN_DATA_PM_OR_INV, 1, 162, 0x0000 },
+	{ CAT_CFN_DATA_PTC_CFP, 2, 5, 0x0000 },
+	{ CAT_CFN_DATA_PTC_FRAG, 4, 36, 0x0000 },
+	{ CAT_CFN_DATA_PTC_INV, 1, 2, 0x0000 },
+	{ CAT_CFN_DATA_PTC_IP_PROT, 8, 40, 0x0000 },
+	{ CAT_CFN_DATA_PTC_ISL, 2, 3, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L2, 7, 12, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L3, 3, 33, 0x0000 },
+	{ CAT_CFN_DATA_PTC_L4, 5, 48, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MAC, 5, 7, 0x0000 },
+	{ CAT_CFN_DATA_PTC_MPLS, 8, 25, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_FRAG, 4, 81, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_IP_PROT, 8, 85, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L2, 2, 64, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L3, 3, 78, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_L4, 5, 93, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_MPLS, 8, 70, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TNL_VLAN, 4, 66, 0x0000 },
+	{ CAT_CFN_DATA_PTC_TUNNEL, 11, 53, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VLAN, 4, 21, 0x0000 },
+	{ CAT_CFN_DATA_PTC_VNTAG, 2, 19, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_ctrl_fields[] = {
+	{ CAT_COT_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_COT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cot_data_fields[] = {
+	{ CAT_COT_DATA_COLOR, 32, 0, 0x0000 },
+	{ CAT_COT_DATA_KM, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_ctrl_fields[] = {
+	{ CAT_CTE_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_CTE_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cte_data_fields[] = {
+	{ CAT_CTE_DATA_COL_ENABLE, 1, 0, 0x0000 },
+	{ CAT_CTE_DATA_COR_ENABLE, 1, 1, 0x0000 },
+	{ CAT_CTE_DATA_EPP_ENABLE, 1, 9, 0x0000 },
+	{ CAT_CTE_DATA_HSH_ENABLE, 1, 2, 0x0000 },
+	{ CAT_CTE_DATA_HST_ENABLE, 1, 8, 0x0000 },
+	{ CAT_CTE_DATA_IPF_ENABLE, 1, 4, 0x0000 },
+	{ CAT_CTE_DATA_MSK_ENABLE, 1, 7, 0x0000 },
+	{ CAT_CTE_DATA_PDB_ENABLE, 1, 6, 0x0000 },
+	{ CAT_CTE_DATA_QSL_ENABLE, 1, 3, 0x0000 },
+	{ CAT_CTE_DATA_SLC_ENABLE, 1, 5, 0x0000 },
+	{ CAT_CTE_DATA_TPE_ENABLE, 1, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_ctrl_fields[] = {
+	{ CAT_CTS_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_CTS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_cts_data_fields[] = {
+	{ CAT_CTS_DATA_CAT_A, 6, 0, 0x0000 },
+	{ CAT_CTS_DATA_CAT_B, 6, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_ctrl_fields[] = {
+	{ CAT_DCT_CTRL_ADR, 13, 0, 0x0000 },
+	{ CAT_DCT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_data_fields[] = {
+	{ CAT_DCT_DATA_RES, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_dct_sel_fields[] = {
+	{ CAT_DCT_SEL_LU, 2, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_ctrl_fields[] = {
+	{ CAT_EXO_CTRL_ADR, 2, 0, 0x0000 },
+	{ CAT_EXO_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_exo_data_fields[] = {
+	{ CAT_EXO_DATA_DYN, 5, 0, 0x0000 },
+	{ CAT_EXO_DATA_OFS, 11, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_ctrl_fields[] = {
+	{ CAT_FTE0_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte0_data_fields[] = {
+	{ CAT_FTE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_ctrl_fields[] = {
+	{ CAT_FTE1_CTRL_ADR, 9, 0, 0x0000 },
+	{ CAT_FTE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_fte1_data_fields[] = {
+	{ CAT_FTE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_join_fields[] = {
+	{ CAT_JOIN_J1, 2, 0, 0x0000 },
+	{ CAT_JOIN_J2, 1, 8, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_ctrl_fields[] = {
+	{ CAT_KCC_CTRL_ADR, 11, 0, 0x0000 },
+	{ CAT_KCC_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcc_data_fields[] = {
+	{ CAT_KCC_DATA_CATEGORY, 8, 64, 0x0000 },
+	{ CAT_KCC_DATA_ID, 12, 72, 0x0000 },
+	{ CAT_KCC_DATA_KEY, 64, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_ctrl_fields[] = {
+	{ CAT_KCE0_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce0_data_fields[] = {
+	{ CAT_KCE0_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_ctrl_fields[] = {
+	{ CAT_KCE1_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_KCE1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kce1_data_fields[] = {
+	{ CAT_KCE1_DATA_ENABLE, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_ctrl_fields[] = {
+	{ CAT_KCS0_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs0_data_fields[] = {
+	{ CAT_KCS0_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_ctrl_fields[] = {
+	{ CAT_KCS1_CTRL_ADR, 6, 0, 0x0000 },
+	{ CAT_KCS1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_kcs1_data_fields[] = {
+	{ CAT_KCS1_DATA_CATEGORY, 6, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_ctrl_fields[] = {
+	{ CAT_LEN_CTRL_ADR, 3, 0, 0x0000 },
+	{ CAT_LEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_len_data_fields[] = {
+	{ CAT_LEN_DATA_DYN1, 5, 28, 0x0000 },
+	{ CAT_LEN_DATA_DYN2, 5, 33, 0x0000 },
+	{ CAT_LEN_DATA_INV, 1, 38, 0x0000 },
+	{ CAT_LEN_DATA_LOWER, 14, 0, 0x0000 },
+	{ CAT_LEN_DATA_UPPER, 14, 14, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_ctrl_fields[] = {
+	{ CAT_RCK_CTRL_ADR, 8, 0, 0x0000 },
+	{ CAT_RCK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cat_rck_data_fields[] = {
+	{ CAT_RCK_DATA_CM0U, 1, 1, 0x0000 },
+	{ CAT_RCK_DATA_CM1U, 1, 5, 0x0000 },
+	{ CAT_RCK_DATA_CM2U, 1, 9, 0x0000 },
+	{ CAT_RCK_DATA_CM3U, 1, 13, 0x0000 },
+	{ CAT_RCK_DATA_CM4U, 1, 17, 0x0000 },
+	{ CAT_RCK_DATA_CM5U, 1, 21, 0x0000 },
+	{ CAT_RCK_DATA_CM6U, 1, 25, 0x0000 },
+	{ CAT_RCK_DATA_CM7U, 1, 29, 0x0000 },
+	{ CAT_RCK_DATA_CML0, 1, 0, 0x0000 },
+	{ CAT_RCK_DATA_CML1, 1, 4, 0x0000 },
+	{ CAT_RCK_DATA_CML2, 1, 8, 0x0000 },
+	{ CAT_RCK_DATA_CML3, 1, 12, 0x0000 },
+	{ CAT_RCK_DATA_CML4, 1, 16, 0x0000 },
+	{ CAT_RCK_DATA_CML5, 1, 20, 0x0000 },
+	{ CAT_RCK_DATA_CML6, 1, 24, 0x0000 },
+	{ CAT_RCK_DATA_CML7, 1, 28, 0x0000 },
+	{ CAT_RCK_DATA_SEL0, 1, 2, 0x0000 },
+	{ CAT_RCK_DATA_SEL1, 1, 6, 0x0000 },
+	{ CAT_RCK_DATA_SEL2, 1, 10, 0x0000 },
+	{ CAT_RCK_DATA_SEL3, 1, 14, 0x0000 },
+	{ CAT_RCK_DATA_SEL4, 1, 18, 0x0000 },
+	{ CAT_RCK_DATA_SEL5, 1, 22, 0x0000 },
+	{ CAT_RCK_DATA_SEL6, 1, 26, 0x0000 },
+	{ CAT_RCK_DATA_SEL7, 1, 30, 0x0000 },
+	{ CAT_RCK_DATA_SEU0, 1, 3, 0x0000 },
+	{ CAT_RCK_DATA_SEU1, 1, 7, 0x0000 },
+	{ CAT_RCK_DATA_SEU2, 1, 11, 0x0000 },
+	{ CAT_RCK_DATA_SEU3, 1, 15, 0x0000 },
+	{ CAT_RCK_DATA_SEU4, 1, 19, 0x0000 },
+	{ CAT_RCK_DATA_SEU5, 1, 23, 0x0000 },
+	{ CAT_RCK_DATA_SEU6, 1, 27, 0x0000 },
+	{ CAT_RCK_DATA_SEU7, 1, 31, 0x0000 },
+};
+
+static nt_fpga_register_init_t cat_registers[] = {
+	{ CAT_CCT_CTRL, 30, 32, REGISTER_TYPE_WO, 0, 2, cat_cct_ctrl_fields },
+	{ CAT_CCT_DATA, 31, 36, REGISTER_TYPE_WO, 0, 2, cat_cct_data_fields },
+	{ CAT_CFN_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, cat_cfn_ctrl_fields },
+	{ CAT_CFN_DATA, 11, 179, REGISTER_TYPE_WO, 0, 44, cat_cfn_data_fields },
+	{ CAT_COT_CTRL, 28, 32, REGISTER_TYPE_WO, 0, 2, cat_cot_ctrl_fields },
+	{ CAT_COT_DATA, 29, 36, REGISTER_TYPE_WO, 0, 2, cat_cot_data_fields },
+	{ CAT_CTE_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2, cat_cte_ctrl_fields },
+	{ CAT_CTE_DATA, 25, 11, REGISTER_TYPE_WO, 0, 11, cat_cte_data_fields },
+	{ CAT_CTS_CTRL, 26, 32, REGISTER_TYPE_WO, 0, 2, cat_cts_ctrl_fields },
+	{ CAT_CTS_DATA, 27, 12, REGISTER_TYPE_WO, 0, 2, cat_cts_data_fields },
+	{ CAT_DCT_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, cat_dct_ctrl_fields },
+	{ CAT_DCT_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1, cat_dct_data_fields },
+	{ CAT_DCT_SEL, 4, 2, REGISTER_TYPE_WO, 0, 1, cat_dct_sel_fields },
+	{ CAT_EXO_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, cat_exo_ctrl_fields },
+	{ CAT_EXO_DATA, 1, 27, REGISTER_TYPE_WO, 0, 2, cat_exo_data_fields },
+	{ CAT_FTE0_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2, cat_fte0_ctrl_fields },
+	{ CAT_FTE0_DATA, 17, 8, REGISTER_TYPE_WO, 0, 1, cat_fte0_data_fields },
+	{ CAT_FTE1_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2, cat_fte1_ctrl_fields },
+	{ CAT_FTE1_DATA, 23, 8, REGISTER_TYPE_WO, 0, 1, cat_fte1_data_fields },
+	{ CAT_JOIN, 5, 9, REGISTER_TYPE_WO, 0, 2, cat_join_fields },
+	{ CAT_KCC_CTRL, 32, 32, REGISTER_TYPE_WO, 0, 2, cat_kcc_ctrl_fields },
+	{ CAT_KCC_DATA, 33, 84, REGISTER_TYPE_WO, 0, 3, cat_kcc_data_fields },
+	{ CAT_KCE0_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, cat_kce0_ctrl_fields },
+	{ CAT_KCE0_DATA, 13, 8, REGISTER_TYPE_WO, 0, 1, cat_kce0_data_fields },
+	{ CAT_KCE1_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2, cat_kce1_ctrl_fields },
+	{ CAT_KCE1_DATA, 19, 8, REGISTER_TYPE_WO, 0, 1, cat_kce1_data_fields },
+	{ CAT_KCS0_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs0_ctrl_fields },
+	{ CAT_KCS0_DATA, 15, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs0_data_fields },
+	{ CAT_KCS1_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2, cat_kcs1_ctrl_fields },
+	{ CAT_KCS1_DATA, 21, 6, REGISTER_TYPE_WO, 0, 1, cat_kcs1_data_fields },
+	{ CAT_LEN_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2, cat_len_ctrl_fields },
+	{ CAT_LEN_DATA, 9, 39, REGISTER_TYPE_WO, 0, 5, cat_len_data_fields },
+	{ CAT_RCK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, cat_rck_ctrl_fields },
+	{ CAT_RCK_DATA, 3, 32, REGISTER_TYPE_WO, 0, 32, cat_rck_data_fields },
+};
+
+static nt_fpga_field_init_t cpy_writer0_ctrl_fields[] = {
+	{ CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_data_fields[] = {
+	{ CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER0_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER0_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_ctrl_fields[] = {
+	{ CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer0_mask_data_fields[] = {
+	{ CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_ctrl_fields[] = {
+	{ CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_data_fields[] = {
+	{ CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER1_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER1_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_ctrl_fields[] = {
+	{ CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer1_mask_data_fields[] = {
+	{ CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_ctrl_fields[] = {
+	{ CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_data_fields[] = {
+	{ CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER2_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER2_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_ctrl_fields[] = {
+	{ CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer2_mask_data_fields[] = {
+	{ CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_ctrl_fields[] = {
+	{ CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_data_fields[] = {
+	{ CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER3_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER3_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_ctrl_fields[] = {
+	{ CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer3_mask_data_fields[] = {
+	{ CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_ctrl_fields[] = {
+	{ CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_data_fields[] = {
+	{ CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 },
+	{ CPY_WRITER4_DATA_LEN, 4, 22, 0x0000 },
+	{ CPY_WRITER4_DATA_MASK_POINTER, 4, 26, 0x0000 },
+	{ CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 },
+	{ CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_ctrl_fields[] = {
+	{ CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 },
+	{ CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t cpy_writer4_mask_data_fields[] = {
+	{ CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t cpy_registers[] = {
+	{	CPY_WRITER0_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_ctrl_fields
+	},
+	{	CPY_WRITER0_DATA, 1, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer0_data_fields
+	},
+	{	CPY_WRITER0_MASK_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer0_mask_ctrl_fields
+	},
+	{	CPY_WRITER0_MASK_DATA, 3, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer0_mask_data_fields
+	},
+	{	CPY_WRITER1_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_ctrl_fields
+	},
+	{	CPY_WRITER1_DATA, 5, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer1_data_fields
+	},
+	{	CPY_WRITER1_MASK_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer1_mask_ctrl_fields
+	},
+	{	CPY_WRITER1_MASK_DATA, 7, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer1_mask_data_fields
+	},
+	{	CPY_WRITER2_CTRL, 8, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_ctrl_fields
+	},
+	{	CPY_WRITER2_DATA, 9, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer2_data_fields
+	},
+	{	CPY_WRITER2_MASK_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer2_mask_ctrl_fields
+	},
+	{	CPY_WRITER2_MASK_DATA, 11, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer2_mask_data_fields
+	},
+	{	CPY_WRITER3_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_ctrl_fields
+	},
+	{	CPY_WRITER3_DATA, 13, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer3_data_fields
+	},
+	{	CPY_WRITER3_MASK_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer3_mask_ctrl_fields
+	},
+	{	CPY_WRITER3_MASK_DATA, 15, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer3_mask_data_fields
+	},
+	{	CPY_WRITER4_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_ctrl_fields
+	},
+	{	CPY_WRITER4_DATA, 17, 30, REGISTER_TYPE_WO, 0, 5,
+		cpy_writer4_data_fields
+	},
+	{	CPY_WRITER4_MASK_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		cpy_writer4_mask_ctrl_fields
+	},
+	{	CPY_WRITER4_MASK_DATA, 19, 16, REGISTER_TYPE_WO, 0, 1,
+		cpy_writer4_mask_data_fields
+	},
+};
+
+static nt_fpga_field_init_t csu_rcp_ctrl_fields[] = {
+	{ CSU_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ CSU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t csu_rcp_data_fields[] = {
+	{ CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 },
+	{ CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 },
+	{ CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 },
+	{ CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 },
+};
+
+static nt_fpga_register_init_t csu_registers[] = {
+	{ CSU_RCP_CTRL, 1, 32, REGISTER_TYPE_WO, 0, 2, csu_rcp_ctrl_fields },
+	{ CSU_RCP_DATA, 2, 10, REGISTER_TYPE_WO, 0, 4, csu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_ctrl_fields[] = {
+	{ DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_am_data_fields[] = {
+	{ DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_control_fields[] = {
+	{ DBS_RX_CONTROL_AME, 1, 7, 0 },  { DBS_RX_CONTROL_AMS, 4, 8, 8 },
+	{ DBS_RX_CONTROL_LQ, 7, 0, 0 },	  { DBS_RX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_ctrl_fields[] = {
+	{ DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_dr_data_fields[] = {
+	{ DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_RX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_idle_fields[] = {
+	{ DBS_RX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_RX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_fields[] = {
+	{ DBS_RX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_RX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_RX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_init_val_fields[] = {
+	{ DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_ptr_fields[] = {
+	{ DBS_RX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_RX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_RX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_ctrl_fields[] = {
+	{ DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_rx_uw_data_fields[] = {
+	{ DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_RX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_RX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_RX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_ctrl_fields[] = {
+	{ DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_am_data_fields[] = {
+	{ DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 },
+	{ DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_AM_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_AM_DATA_INT, 1, 74, 0x0000 },
+	{ DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_control_fields[] = {
+	{ DBS_TX_CONTROL_AME, 1, 7, 0 },  { DBS_TX_CONTROL_AMS, 4, 8, 5 },
+	{ DBS_TX_CONTROL_LQ, 7, 0, 0 },	  { DBS_TX_CONTROL_QE, 1, 17, 0 },
+	{ DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_ctrl_fields[] = {
+	{ DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_dr_data_fields[] = {
+	{ DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 },
+	{ DBS_TX_DR_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 },
+	{ DBS_TX_DR_DATA_QS, 15, 72, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_idle_fields[] = {
+	{ DBS_TX_IDLE_BUSY, 1, 8, 0 },
+	{ DBS_TX_IDLE_IDLE, 1, 0, 0x0000 },
+	{ DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_fields[] = {
+	{ DBS_TX_INIT_BUSY, 1, 8, 0 },
+	{ DBS_TX_INIT_INIT, 1, 0, 0x0000 },
+	{ DBS_TX_INIT_QUEUE, 7, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_init_val_fields[] = {
+	{ DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 },
+	{ DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_ptr_fields[] = {
+	{ DBS_TX_PTR_PTR, 16, 0, 0x0000 },
+	{ DBS_TX_PTR_QUEUE, 7, 16, 0x0000 },
+	{ DBS_TX_PTR_VALID, 1, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_ctrl_fields[] = {
+	{ DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_data_fields[] = {
+	{ DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 },
+	{ DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 },
+	{ DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qos_rate_fields[] = {
+	{ DBS_TX_QOS_RATE_DIV, 19, 16, 2 },
+	{ DBS_TX_QOS_RATE_MUL, 16, 0, 1 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_ctrl_fields[] = {
+	{ DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_qp_data_fields[] = {
+	{ DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_ctrl_fields[] = {
+	{ DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 },
+	{ DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t dbs_tx_uw_data_fields[] = {
+	{ DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 },
+	{ DBS_TX_UW_DATA_HID, 8, 64, 0x0000 },
+	{ DBS_TX_UW_DATA_INO, 1, 93, 0x0000 },
+	{ DBS_TX_UW_DATA_INT, 1, 88, 0x0000 },
+	{ DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 },
+	{ DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 },
+	{ DBS_TX_UW_DATA_QS, 15, 72, 0x0000 },
+	{ DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 },
+};
+
+static nt_fpga_register_init_t dbs_registers[] = {
+	{	DBS_RX_AM_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_am_ctrl_fields
+	},
+	{	DBS_RX_AM_DATA, 11, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_am_data_fields
+	},
+	{	DBS_RX_CONTROL, 0, 18, REGISTER_TYPE_RW, 43008, 6,
+		dbs_rx_control_fields
+	},
+	{	DBS_RX_DR_CTRL, 18, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_dr_ctrl_fields
+	},
+	{	DBS_RX_DR_DATA, 19, 89, REGISTER_TYPE_WO, 0, 5,
+		dbs_rx_dr_data_fields
+	},
+	{ DBS_RX_IDLE, 8, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_idle_fields },
+	{ DBS_RX_INIT, 2, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_init_fields },
+	{	DBS_RX_INIT_VAL, 3, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_init_val_fields
+	},
+	{ DBS_RX_PTR, 4, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields },
+	{	DBS_RX_UW_CTRL, 14, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_rx_uw_ctrl_fields
+	},
+	{	DBS_RX_UW_DATA, 15, 93, REGISTER_TYPE_WO, 0, 7,
+		dbs_rx_uw_data_fields
+	},
+	{	DBS_TX_AM_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_am_ctrl_fields
+	},
+	{	DBS_TX_AM_DATA, 13, 75, REGISTER_TYPE_WO, 0, 5,
+		dbs_tx_am_data_fields
+	},
+	{	DBS_TX_CONTROL, 1, 18, REGISTER_TYPE_RW, 66816, 6,
+		dbs_tx_control_fields
+	},
+	{	DBS_TX_DR_CTRL, 20, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_dr_ctrl_fields
+	},
+	{	DBS_TX_DR_DATA, 21, 90, REGISTER_TYPE_WO, 0, 6,
+		dbs_tx_dr_data_fields
+	},
+	{ DBS_TX_IDLE, 9, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_idle_fields },
+	{ DBS_TX_INIT, 5, 9, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_init_fields },
+	{	DBS_TX_INIT_VAL, 6, 31, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_init_val_fields
+	},
+	{ DBS_TX_PTR, 7, 24, REGISTER_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields },
+	{	DBS_TX_QOS_CTRL, 24, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qos_ctrl_fields
+	},
+	{	DBS_TX_QOS_DATA, 25, 44, REGISTER_TYPE_WO, 0, 3,
+		dbs_tx_qos_data_fields
+	},
+	{	DBS_TX_QOS_RATE, 26, 35, REGISTER_TYPE_RW, 131073, 2,
+		dbs_tx_qos_rate_fields
+	},
+	{	DBS_TX_QP_CTRL, 22, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_qp_ctrl_fields
+	},
+	{	DBS_TX_QP_DATA, 23, 1, REGISTER_TYPE_WO, 0, 1,
+		dbs_tx_qp_data_fields
+	},
+	{	DBS_TX_UW_CTRL, 16, 32, REGISTER_TYPE_WO, 0, 2,
+		dbs_tx_uw_ctrl_fields
+	},
+	{	DBS_TX_UW_DATA, 17, 94, REGISTER_TYPE_WO, 0, 8,
+		dbs_tx_uw_data_fields
+	},
+};
+
+static nt_fpga_field_init_t flm_buf_ctrl_fields[] = {
+	{ FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 },
+	{ FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 },
+	{ FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_control_fields[] = {
+	{ FLM_CONTROL_CRCRD, 1, 12, 0x0000 },
+	{ FLM_CONTROL_CRCWR, 1, 11, 0x0000 },
+	{ FLM_CONTROL_EAB, 5, 18, 0 },
+	{ FLM_CONTROL_ENABLE, 1, 0, 0 },
+	{ FLM_CONTROL_INIT, 1, 1, 0x0000 },
+	{ FLM_CONTROL_LDS, 1, 2, 0x0000 },
+	{ FLM_CONTROL_LFS, 1, 3, 0x0000 },
+	{ FLM_CONTROL_LIS, 1, 4, 0x0000 },
+	{ FLM_CONTROL_PDS, 1, 9, 0x0000 },
+	{ FLM_CONTROL_PIS, 1, 10, 0x0000 },
+	{ FLM_CONTROL_RBL, 4, 13, 0 },
+	{ FLM_CONTROL_RDS, 1, 7, 0x0000 },
+	{ FLM_CONTROL_RIS, 1, 8, 0x0000 },
+	{ FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 },
+	{ FLM_CONTROL_UDS, 1, 5, 0x0000 },
+	{ FLM_CONTROL_UIS, 1, 6, 0x0000 },
+	{ FLM_CONTROL_WPD, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t flm_inf_data_fields[] = {
+	{ FLM_INF_DATA_BYTES, 64, 0, 0x0000 },
+	{ FLM_INF_DATA_CAUSE, 3, 264, 0x0000 },
+	{ FLM_INF_DATA_EOR, 1, 287, 0x0000 },
+	{ FLM_INF_DATA_ID, 72, 192, 0x0000 },
+	{ FLM_INF_DATA_PACKETS, 64, 64, 0x0000 },
+	{ FLM_INF_DATA_TS, 64, 128, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_aps_fields[] = {
+	{ FLM_LOAD_APS_APS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_bin_fields[] = {
+	{ FLM_LOAD_BIN_BIN, 30, 0, 8388607 },
+};
+
+static nt_fpga_field_init_t flm_load_lps_fields[] = {
+	{ FLM_LOAD_LPS_LPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_load_pps_fields[] = {
+	{ FLM_LOAD_PPS_PPS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_lrn_data_fields[] = {
+	{ FLM_LRN_DATA_ADJ, 32, 480, 0x0000 },
+	{ FLM_LRN_DATA_COLOR, 32, 448, 0x0000 },
+	{ FLM_LRN_DATA_DSCP, 6, 734, 0x0000 },
+	{ FLM_LRN_DATA_ENT, 1, 729, 0x0000 },
+	{ FLM_LRN_DATA_EOR, 1, 767, 0x0000 },
+	{ FLM_LRN_DATA_FILL, 12, 584, 0x0000 },
+	{ FLM_LRN_DATA_FT, 4, 596, 0x0000 },
+	{ FLM_LRN_DATA_FT_MBR, 4, 600, 0x0000 },
+	{ FLM_LRN_DATA_FT_MISS, 4, 604, 0x0000 },
+	{ FLM_LRN_DATA_ID, 72, 512, 0x0000 },
+	{ FLM_LRN_DATA_KID, 8, 328, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID1, 28, 608, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID2, 28, 636, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID3, 28, 664, 0x0000 },
+	{ FLM_LRN_DATA_MBR_ID4, 28, 692, 0x0000 },
+	{ FLM_LRN_DATA_NAT_EN, 1, 747, 0x0000 },
+	{ FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 },
+	{ FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 },
+	{ FLM_LRN_DATA_OP, 4, 730, 0x0000 },
+	{ FLM_LRN_DATA_PRIO, 2, 727, 0x0000 },
+	{ FLM_LRN_DATA_PROT, 8, 320, 0x0000 },
+	{ FLM_LRN_DATA_QFI, 6, 740, 0x0000 },
+	{ FLM_LRN_DATA_QW0, 128, 192, 0x0000 },
+	{ FLM_LRN_DATA_QW4, 128, 64, 0x0000 },
+	{ FLM_LRN_DATA_RATE, 16, 416, 0x0000 },
+	{ FLM_LRN_DATA_RQI, 1, 746, 0x0000 },
+	{ FLM_LRN_DATA_SIZE, 16, 432, 0x0000 },
+	{ FLM_LRN_DATA_STAT_PROF, 4, 723, 0x0000 },
+	{ FLM_LRN_DATA_SW8, 32, 32, 0x0000 },
+	{ FLM_LRN_DATA_SW9, 32, 0, 0x0000 },
+	{ FLM_LRN_DATA_TEID, 32, 368, 0x0000 },
+	{ FLM_LRN_DATA_VOL_IDX, 3, 720, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_prio_fields[] = {
+	{ FLM_PRIO_FT0, 4, 4, 1 },     { FLM_PRIO_FT1, 4, 12, 1 },
+	{ FLM_PRIO_FT2, 4, 20, 1 },    { FLM_PRIO_FT3, 4, 28, 1 },
+	{ FLM_PRIO_LIMIT0, 4, 0, 0 },  { FLM_PRIO_LIMIT1, 4, 8, 0 },
+	{ FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 },
+};
+
+static nt_fpga_field_init_t flm_pst_ctrl_fields[] = {
+	{ FLM_PST_CTRL_ADR, 4, 0, 0x0000 },
+	{ FLM_PST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_pst_data_fields[] = {
+	{ FLM_PST_DATA_BP, 5, 0, 0x0000 },
+	{ FLM_PST_DATA_PP, 5, 5, 0x0000 },
+	{ FLM_PST_DATA_TP, 5, 10, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_ctrl_fields[] = {
+	{ FLM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ FLM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_rcp_data_fields[] = {
+	{ FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 },
+	{ FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 },
+	{ FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 },
+	{ FLM_RCP_DATA_IPN, 1, 386, 0x0000 },
+	{ FLM_RCP_DATA_KID, 8, 377, 0x0000 },
+	{ FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 },
+	{ FLM_RCP_DATA_MASK, 320, 57, 0x0000 },
+	{ FLM_RCP_DATA_OPN, 1, 385, 0x0000 },
+	{ FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 },
+	{ FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 },
+	{ FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 },
+	{ FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 },
+	{ FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 },
+	{ FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 },
+	{ FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 },
+	{ FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 },
+	{ FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 },
+	{ FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 },
+	{ FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_scrub_fields[] = {
+	{ FLM_SCRUB_I, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t flm_status_fields[] = {
+	{ FLM_STATUS_CALIBDONE, 1, 0, 0x0000 },
+	{ FLM_STATUS_CRCERR, 1, 5, 0x0000 },
+	{ FLM_STATUS_CRITICAL, 1, 3, 0x0000 },
+	{ FLM_STATUS_EFT_BP, 1, 6, 0x0000 },
+	{ FLM_STATUS_IDLE, 1, 2, 0x0000 },
+	{ FLM_STATUS_INITDONE, 1, 1, 0x0000 },
+	{ FLM_STATUS_PANIC, 1, 4, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_done_fields[] = {
+	{ FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_fail_fields[] = {
+	{ FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_aul_ignore_fields[] = {
+	{ FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_hit_fields[] = {
+	{ FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_miss_fields[] = {
+	{ FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_csh_unh_fields[] = {
+	{ FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_move_fields[] = {
+	{ FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_cuc_start_fields[] = {
+	{ FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_flows_fields[] = {
+	{ FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_done_fields[] = {
+	{ FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_inf_skip_fields[] = {
+	{ FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_done_fields[] = {
+	{ FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_fail_fields[] = {
+	{ FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_lrn_ignore_fields[] = {
+	{ FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_dis_fields[] = {
+	{ FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_hit_fields[] = {
+	{ FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_miss_fields[] = {
+	{ FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_pck_unh_fields[] = {
+	{ FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_done_fields[] = {
+	{ FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_prb_ignore_fields[] = {
+	{ FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_done_fields[] = {
+	{ FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_rel_ignore_fields[] = {
+	{ FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_sta_done_fields[] = {
+	{ FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_tul_done_fields[] = {
+	{ FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_done_fields[] = {
+	{ FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_stat_unl_ignore_fields[] = {
+	{ FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_sta_data_fields[] = {
+	{ FLM_STA_DATA_EOR, 1, 95, 0x0000 },
+	{ FLM_STA_DATA_ID, 72, 0, 0x0000 },
+	{ FLM_STA_DATA_LDS, 1, 72, 0x0000 },
+	{ FLM_STA_DATA_LFS, 1, 73, 0x0000 },
+	{ FLM_STA_DATA_LIS, 1, 74, 0x0000 },
+	{ FLM_STA_DATA_PDS, 1, 79, 0x0000 },
+	{ FLM_STA_DATA_PIS, 1, 80, 0x0000 },
+	{ FLM_STA_DATA_RDS, 1, 77, 0x0000 },
+	{ FLM_STA_DATA_RIS, 1, 78, 0x0000 },
+	{ FLM_STA_DATA_UDS, 1, 75, 0x0000 },
+	{ FLM_STA_DATA_UIS, 1, 76, 0x0000 },
+};
+
+static nt_fpga_field_init_t flm_timeout_fields[] = {
+	{ FLM_TIMEOUT_T, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t flm_registers[] = {
+	{ FLM_BUF_CTRL, 14, 48, REGISTER_TYPE_RW, 0, 3, flm_buf_ctrl_fields },
+	{	FLM_CONTROL, 0, 28, REGISTER_TYPE_MIXED, 134217728, 17,
+		flm_control_fields
+	},
+	{ FLM_INF_DATA, 16, 288, REGISTER_TYPE_RO, 0, 6, flm_inf_data_fields },
+	{ FLM_LOAD_APS, 7, 32, REGISTER_TYPE_RO, 0, 1, flm_load_aps_fields },
+	{	FLM_LOAD_BIN, 4, 30, REGISTER_TYPE_WO, 8388607, 1,
+		flm_load_bin_fields
+	},
+	{ FLM_LOAD_LPS, 6, 32, REGISTER_TYPE_RO, 0, 1, flm_load_lps_fields },
+	{ FLM_LOAD_PPS, 5, 32, REGISTER_TYPE_RO, 0, 1, flm_load_pps_fields },
+	{ FLM_LRN_DATA, 15, 768, REGISTER_TYPE_WO, 0, 32, flm_lrn_data_fields },
+	{ FLM_PRIO, 8, 32, REGISTER_TYPE_WO, 269488144, 8, flm_prio_fields },
+	{ FLM_PST_CTRL, 10, 32, REGISTER_TYPE_WO, 0, 2, flm_pst_ctrl_fields },
+	{ FLM_PST_DATA, 11, 15, REGISTER_TYPE_WO, 0, 3, flm_pst_data_fields },
+	{ FLM_RCP_CTRL, 12, 32, REGISTER_TYPE_WO, 0, 2, flm_rcp_ctrl_fields },
+	{ FLM_RCP_DATA, 13, 403, REGISTER_TYPE_WO, 0, 19, flm_rcp_data_fields },
+	{ FLM_SCRUB, 3, 16, REGISTER_TYPE_WO, 0, 1, flm_scrub_fields },
+	{ FLM_STATUS, 1, 12, REGISTER_TYPE_MIXED, 0, 7, flm_status_fields },
+	{	FLM_STAT_AUL_DONE, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_done_fields
+	},
+	{	FLM_STAT_AUL_FAIL, 43, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_fail_fields
+	},
+	{	FLM_STAT_AUL_IGNORE, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_aul_ignore_fields
+	},
+	{	FLM_STAT_CSH_HIT, 52, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_hit_fields
+	},
+	{	FLM_STAT_CSH_MISS, 53, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_miss_fields
+	},
+	{	FLM_STAT_CSH_UNH, 54, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_csh_unh_fields
+	},
+	{	FLM_STAT_CUC_MOVE, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_move_fields
+	},
+	{	FLM_STAT_CUC_START, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_cuc_start_fields
+	},
+	{	FLM_STAT_FLOWS, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_flows_fields
+	},
+	{	FLM_STAT_INF_DONE, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_done_fields
+	},
+	{	FLM_STAT_INF_SKIP, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_inf_skip_fields
+	},
+	{	FLM_STAT_LRN_DONE, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_done_fields
+	},
+	{	FLM_STAT_LRN_FAIL, 34, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_fail_fields
+	},
+	{	FLM_STAT_LRN_IGNORE, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_lrn_ignore_fields
+	},
+	{	FLM_STAT_PCK_DIS, 51, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_dis_fields
+	},
+	{	FLM_STAT_PCK_HIT, 48, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_hit_fields
+	},
+	{	FLM_STAT_PCK_MISS, 49, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_miss_fields
+	},
+	{	FLM_STAT_PCK_UNH, 50, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_pck_unh_fields
+	},
+	{	FLM_STAT_PRB_DONE, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_done_fields
+	},
+	{	FLM_STAT_PRB_IGNORE, 40, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_prb_ignore_fields
+	},
+	{	FLM_STAT_REL_DONE, 37, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_done_fields
+	},
+	{	FLM_STAT_REL_IGNORE, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_rel_ignore_fields
+	},
+	{	FLM_STAT_STA_DONE, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_sta_done_fields
+	},
+	{	FLM_STAT_TUL_DONE, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_tul_done_fields
+	},
+	{	FLM_STAT_UNL_DONE, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_done_fields
+	},
+	{	FLM_STAT_UNL_IGNORE, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		flm_stat_unl_ignore_fields
+	},
+	{ FLM_STA_DATA, 17, 96, REGISTER_TYPE_RO, 0, 11, flm_sta_data_fields },
+	{ FLM_TIMEOUT, 2, 32, REGISTER_TYPE_WO, 0, 1, flm_timeout_fields },
+};
+
+static nt_fpga_field_init_t gfg_burstsize0_fields[] = {
+	{ GFG_BURSTSIZE0_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_burstsize1_fields[] = {
+	{ GFG_BURSTSIZE1_VAL, 24, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl0_fields[] = {
+	{ GFG_CTRL0_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL0_MODE, 3, 1, 0 },
+	{ GFG_CTRL0_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL0_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_ctrl1_fields[] = {
+	{ GFG_CTRL1_ENABLE, 1, 0, 0 },
+	{ GFG_CTRL1_MODE, 3, 1, 0 },
+	{ GFG_CTRL1_PRBS_EN, 1, 4, 0 },
+	{ GFG_CTRL1_SIZE, 14, 16, 64 },
+};
+
+static nt_fpga_field_init_t gfg_run0_fields[] = {
+	{ GFG_RUN0_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_run1_fields[] = {
+	{ GFG_RUN1_RUN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask0_fields[] = {
+	{ GFG_SIZEMASK0_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_sizemask1_fields[] = {
+	{ GFG_SIZEMASK1_VAL, 14, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid0_fields[] = {
+	{ GFG_STREAMID0_VAL, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t gfg_streamid1_fields[] = {
+	{ GFG_STREAMID1_VAL, 8, 0, 1 },
+};
+
+static nt_fpga_register_init_t gfg_registers[] = {
+	{	GFG_BURSTSIZE0, 3, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize0_fields
+	},
+	{	GFG_BURSTSIZE1, 8, 24, REGISTER_TYPE_WO, 0, 1,
+		gfg_burstsize1_fields
+	},
+	{ GFG_CTRL0, 0, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl0_fields },
+	{ GFG_CTRL1, 5, 30, REGISTER_TYPE_WO, 4194304, 4, gfg_ctrl1_fields },
+	{ GFG_RUN0, 1, 1, REGISTER_TYPE_WO, 0, 1, gfg_run0_fields },
+	{ GFG_RUN1, 6, 1, REGISTER_TYPE_WO, 0, 1, gfg_run1_fields },
+	{ GFG_SIZEMASK0, 4, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask0_fields },
+	{ GFG_SIZEMASK1, 9, 14, REGISTER_TYPE_WO, 0, 1, gfg_sizemask1_fields },
+	{ GFG_STREAMID0, 2, 8, REGISTER_TYPE_WO, 0, 1, gfg_streamid0_fields },
+	{ GFG_STREAMID1, 7, 8, REGISTER_TYPE_WO, 1, 1, gfg_streamid1_fields },
+};
+
+static nt_fpga_field_init_t gmf_ctrl_fields[] = {
+	{ GMF_CTRL_ENABLE, 1, 0, 0 },
+	{ GMF_CTRL_FCS_ALWAYS, 1, 1, 0 },
+	{ GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 },
+	{ GMF_CTRL_IFG_ENABLE, 1, 2, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 },
+	{ GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 },
+	{ GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 },
+	{ GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 },
+	{ GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 },
+};
+
+static nt_fpga_field_init_t gmf_debug_lane_marker_fields[] = {
+	{ GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_max_adjust_slack_fields[] = {
+	{ GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_set_clock_delta_adjust_fields[] = {
+	{ GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ifg_tx_now_on_ts_fields[] = {
+	{ GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_speed_fields[] = {
+	{ GMF_SPEED_IFG_SPEED, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_data_buffer_fields[] = {
+	{ GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t gmf_stat_max_delayed_pkt_fields[] = {
+	{ GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_next_pkt_fields[] = {
+	{ GMF_STAT_NEXT_PKT_NS, 64, 0, 0 },
+};
+
+static nt_fpga_field_init_t gmf_stat_sticky_fields[] = {
+	{ GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 },
+	{ GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t gmf_ts_inject_fields[] = {
+	{ GMF_TS_INJECT_OFFSET, 14, 0, 0 },
+	{ GMF_TS_INJECT_POS, 2, 14, 0 },
+};
+
+static nt_fpga_register_init_t gmf_registers[] = {
+	{ GMF_CTRL, 0, 10, REGISTER_TYPE_WO, 0, 10, gmf_ctrl_fields },
+	{	GMF_DEBUG_LANE_MARKER, 7, 16, REGISTER_TYPE_WO, 16384, 1,
+		gmf_debug_lane_marker_fields
+	},
+	{	GMF_IFG_MAX_ADJUST_SLACK, 4, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_max_adjust_slack_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA, 2, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_fields
+	},
+	{	GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_set_clock_delta_adjust_fields
+	},
+	{	GMF_IFG_TX_NOW_ON_TS, 5, 64, REGISTER_TYPE_WO, 0, 1,
+		gmf_ifg_tx_now_on_ts_fields
+	},
+	{ GMF_SPEED, 1, 64, REGISTER_TYPE_WO, 0, 1, gmf_speed_fields },
+	{	GMF_STAT_DATA_BUFFER, 9, 15, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_data_buffer_fields
+	},
+	{	GMF_STAT_MAX_DELAYED_PKT, 11, 64, REGISTER_TYPE_RC1, 0, 1,
+		gmf_stat_max_delayed_pkt_fields
+	},
+	{	GMF_STAT_NEXT_PKT, 10, 64, REGISTER_TYPE_RO, 0, 1,
+		gmf_stat_next_pkt_fields
+	},
+	{	GMF_STAT_STICKY, 8, 2, REGISTER_TYPE_RC1, 0, 2,
+		gmf_stat_sticky_fields
+	},
+	{ GMF_TS_INJECT, 6, 16, REGISTER_TYPE_WO, 0, 2, gmf_ts_inject_fields },
+};
+
+static nt_fpga_field_init_t gpio_phy_cfg_fields[] = {
+	{ GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 },
+	{ GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 },
+	{ GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 },
+	{ GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 },
+	{ GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 },
+	{ GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 },
+	{ GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t gpio_phy_gpio_fields[] = {
+	{ GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 },
+	{ GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 },
+	{ GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 },
+	{ GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 },
+	{ GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 },
+	{ GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 },
+	{ GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 },
+};
+
+static nt_fpga_register_init_t gpio_phy_registers[] = {
+	{ GPIO_PHY_CFG, 0, 10, REGISTER_TYPE_RW, 170, 10, gpio_phy_cfg_fields },
+	{	GPIO_PHY_GPIO, 1, 10, REGISTER_TYPE_RW, 17, 10,
+		gpio_phy_gpio_fields
+	},
+};
+
+static nt_fpga_field_init_t hfu_rcp_ctrl_fields[] = {
+	{ HFU_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ HFU_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hfu_rcp_data_fields[] = {
+	{ HFU_RCP_DATA_CSINF, 1, 111, 0x0000 },
+	{ HFU_RCP_DATA_IL3OFS, 8, 139, 0x0000 },
+	{ HFU_RCP_DATA_IL4OFS, 8, 147, 0x0000 },
+	{ HFU_RCP_DATA_L3FRAG, 2, 114, 0x0000 },
+	{ HFU_RCP_DATA_L3PRT, 2, 112, 0x0000 },
+	{ HFU_RCP_DATA_L4PRT, 3, 120, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 },
+	{ HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 },
+	{ HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 },
+	{ HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 },
+	{ HFU_RCP_DATA_OL3OFS, 8, 123, 0x0000 },
+	{ HFU_RCP_DATA_OL4OFS, 8, 131, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 },
+	{ HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 },
+	{ HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 },
+	{ HFU_RCP_DATA_TUNNEL, 4, 116, 0x0000 },
+};
+
+static nt_fpga_register_init_t hfu_registers[] = {
+	{ HFU_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields },
+	{ HFU_RCP_DATA, 1, 155, REGISTER_TYPE_WO, 0, 31, hfu_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hif_build_time_fields[] = {
+	{ HIF_BUILD_TIME_TIME, 32, 0, 1693492863 },
+};
+
+static nt_fpga_field_init_t hif_config_fields[] = {
+	{ HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 },
+	{ HIF_CONFIG_MAX_READ, 3, 3, 0x0000 },
+	{ HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_control_fields[] = {
+	{ HIF_CONTROL_BLESSED, 8, 4, 0 },
+	{ HIF_CONTROL_WRAW, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_ex_fields[] = {
+	{ HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 },
+	{ HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 },
+	{ HIF_PROD_ID_EX_RESERVED, 23, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_lsb_fields[] = {
+	{ HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 },
+	{ HIF_PROD_ID_LSB_REV_ID, 8, 0, 24 },
+	{ HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 },
+};
+
+static nt_fpga_field_init_t hif_prod_id_msb_fields[] = {
+	{ HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 },
+	{ HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 },
+};
+
+static nt_fpga_field_init_t hif_sample_time_fields[] = {
+	{ HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t hif_status_fields[] = {
+	{ HIF_STATUS_RD_ERR, 1, 9, 0 },
+	{ HIF_STATUS_TAGS_IN_USE, 8, 0, 0 },
+	{ HIF_STATUS_WR_ERR, 1, 8, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_ctrl_fields[] = {
+	{ HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 },
+	{ HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_refclk_fields[] = {
+	{ HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_rx_fields[] = {
+	{ HIF_STAT_RX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_stat_tx_fields[] = {
+	{ HIF_STAT_TX_COUNTER, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t hif_test0_fields[] = {
+	{ HIF_TEST0_DATA, 32, 0, 287454020 },
+};
+
+static nt_fpga_field_init_t hif_test1_fields[] = {
+	{ HIF_TEST1_DATA, 32, 0, 2864434397 },
+};
+
+static nt_fpga_field_init_t hif_uuid0_fields[] = {
+	{ HIF_UUID0_UUID0, 32, 0, 3215833203 },
+};
+
+static nt_fpga_field_init_t hif_uuid1_fields[] = {
+	{ HIF_UUID1_UUID1, 32, 0, 2947535663 },
+};
+
+static nt_fpga_field_init_t hif_uuid2_fields[] = {
+	{ HIF_UUID2_UUID2, 32, 0, 1243492979 },
+};
+
+static nt_fpga_field_init_t hif_uuid3_fields[] = {
+	{ HIF_UUID3_UUID3, 32, 0, 2500373735 },
+};
+
+static nt_fpga_register_init_t hif_registers[] = {
+	{	HIF_BUILD_TIME, 16, 32, REGISTER_TYPE_RO, 1693492863, 1,
+		hif_build_time_fields
+	},
+	{ HIF_CONFIG, 24, 7, REGISTER_TYPE_RW, 0, 3, hif_config_fields },
+	{ HIF_CONTROL, 40, 12, REGISTER_TYPE_RW, 1, 2, hif_control_fields },
+	{	HIF_PROD_ID_EX, 112, 32, REGISTER_TYPE_RO, 1, 3,
+		hif_prod_id_ex_fields
+	},
+	{	HIF_PROD_ID_LSB, 0, 32, REGISTER_TYPE_RO, 626734872, 3,
+		hif_prod_id_lsb_fields
+	},
+	{	HIF_PROD_ID_MSB, 8, 22, REGISTER_TYPE_RO, 200, 2,
+		hif_prod_id_msb_fields
+	},
+	{	HIF_SAMPLE_TIME, 96, 1, REGISTER_TYPE_WO, 0, 1,
+		hif_sample_time_fields
+	},
+	{ HIF_STATUS, 32, 10, REGISTER_TYPE_MIXED, 0, 3, hif_status_fields },
+	{ HIF_STAT_CTRL, 64, 2, REGISTER_TYPE_WO, 0, 2, hif_stat_ctrl_fields },
+	{	HIF_STAT_REFCLK, 72, 32, REGISTER_TYPE_RO, 0, 1,
+		hif_stat_refclk_fields
+	},
+	{ HIF_STAT_RX, 88, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_rx_fields },
+	{ HIF_STAT_TX, 80, 32, REGISTER_TYPE_RO, 0, 1, hif_stat_tx_fields },
+	{ HIF_TEST0, 48, 32, REGISTER_TYPE_RW, 287454020, 1, hif_test0_fields },
+	{	HIF_TEST1, 56, 32, REGISTER_TYPE_RW, 2864434397, 1,
+		hif_test1_fields
+	},
+	{	HIF_UUID0, 128, 32, REGISTER_TYPE_RO, 3215833203, 1,
+		hif_uuid0_fields
+	},
+	{	HIF_UUID1, 144, 32, REGISTER_TYPE_RO, 2947535663, 1,
+		hif_uuid1_fields
+	},
+	{	HIF_UUID2, 160, 32, REGISTER_TYPE_RO, 1243492979, 1,
+		hif_uuid2_fields
+	},
+	{	HIF_UUID3, 176, 32, REGISTER_TYPE_RO, 2500373735, 1,
+		hif_uuid3_fields
+	},
+};
+
+static nt_fpga_field_init_t hsh_rcp_ctrl_fields[] = {
+	{ HSH_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ HSH_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hsh_rcp_data_fields[] = {
+	{ HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 },
+	{ HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 },
+	{ HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 },
+	{ HSH_RCP_DATA_K, 320, 422, 0x0000 },
+	{ HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 },
+	{ HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 },
+	{ HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 },
+	{ HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 },
+	{ HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 },
+	{ HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 },
+	{ HSH_RCP_DATA_SEED, 32, 382, 0x0000 },
+	{ HSH_RCP_DATA_SORT, 2, 4, 0x0000 },
+	{ HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 },
+	{ HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 },
+	{ HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 },
+	{ HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 },
+	{ HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 },
+	{ HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 },
+	{ HSH_RCP_DATA_W9_P, 1, 60, 0x0000 },
+	{ HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 },
+	{ HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 },
+	{ HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 },
+};
+
+static nt_fpga_register_init_t hsh_registers[] = {
+	{ HSH_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields },
+	{ HSH_RCP_DATA, 1, 743, REGISTER_TYPE_WO, 0, 23, hsh_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t hst_rcp_ctrl_fields[] = {
+	{ HST_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ HST_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t hst_rcp_data_fields[] = {
+	{ HST_RCP_DATA_END_DYN, 5, 16, 0x0000 },
+	{ HST_RCP_DATA_END_OFS, 10, 21, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_CMD, 3, 31, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_DYN, 5, 34, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_OFS, 10, 39, 0x0000 },
+	{ HST_RCP_DATA_MODIF0_VALUE, 16, 49, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_CMD, 3, 65, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_DYN, 5, 68, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_OFS, 10, 73, 0x0000 },
+	{ HST_RCP_DATA_MODIF1_VALUE, 16, 83, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_CMD, 3, 99, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_DYN, 5, 102, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_OFS, 10, 107, 0x0000 },
+	{ HST_RCP_DATA_MODIF2_VALUE, 16, 117, 0x0000 },
+	{ HST_RCP_DATA_START_DYN, 5, 1, 0x0000 },
+	{ HST_RCP_DATA_START_OFS, 10, 6, 0x0000 },
+	{ HST_RCP_DATA_STRIP_MODE, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t hst_registers[] = {
+	{ HST_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, hst_rcp_ctrl_fields },
+	{ HST_RCP_DATA, 1, 133, REGISTER_TYPE_WO, 0, 17, hst_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t ifr_rcp_ctrl_fields[] = {
+	{ IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ifr_rcp_data_fields[] = {
+	{ IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t ifr_registers[] = {
+	{ IFR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields },
+	{ IFR_RCP_DATA, 1, 15, REGISTER_TYPE_WO, 0, 2, ifr_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t iic_adr_fields[] = {
+	{ IIC_ADR_SLV_ADR, 7, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_cr_fields[] = {
+	{ IIC_CR_EN, 1, 0, 0 },	  { IIC_CR_GC_EN, 1, 6, 0 },
+	{ IIC_CR_MSMS, 1, 2, 0 }, { IIC_CR_RST, 1, 7, 0 },
+	{ IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 },
+	{ IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t iic_dgie_fields[] = {
+	{ IIC_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t iic_gpo_fields[] = {
+	{ IIC_GPO_GPO_VAL, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ier_fields[] = {
+	{ IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 },
+	{ IIC_IER_INT2, 1, 2, 0 }, { IIC_IER_INT3, 1, 3, 0 },
+	{ IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 },
+	{ IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_isr_fields[] = {
+	{ IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 },
+	{ IIC_ISR_INT2, 1, 2, 0 }, { IIC_ISR_INT3, 1, 3, 0 },
+	{ IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 },
+	{ IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_fields[] = {
+	{ IIC_RX_FIFO_RXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_ocy_fields[] = {
+	{ IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_rx_fifo_pirq_fields[] = {
+	{ IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_softr_fields[] = {
+	{ IIC_SOFTR_RKEY, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t iic_sr_fields[] = {
+	{ IIC_SR_AAS, 1, 1, 0 },	  { IIC_SR_ABGC, 1, 0, 0 },
+	{ IIC_SR_BB, 1, 2, 0 },		  { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 },
+	{ IIC_SR_RXFIFO_FULL, 1, 5, 0 },  { IIC_SR_SRW, 1, 3, 0 },
+	{ IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 },
+};
+
+static nt_fpga_field_init_t iic_tbuf_fields[] = {
+	{ IIC_TBUF_TBUF_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_ten_adr_fields[] = {
+	{ IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thddat_fields[] = {
+	{ IIC_THDDAT_THDDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thdsta_fields[] = {
+	{ IIC_THDSTA_THDSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_thigh_fields[] = {
+	{ IIC_THIGH_THIGH_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tlow_fields[] = {
+	{ IIC_TLOW_TLOW_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsudat_fields[] = {
+	{ IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusta_fields[] = {
+	{ IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tsusto_fields[] = {
+	{ IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_fields[] = {
+	{ IIC_TX_FIFO_START, 1, 8, 0 },
+	{ IIC_TX_FIFO_STOP, 1, 9, 0 },
+	{ IIC_TX_FIFO_TXDATA, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t iic_tx_fifo_ocy_fields[] = {
+	{ IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t iic_registers[] = {
+	{ IIC_ADR, 68, 8, REGISTER_TYPE_RW, 0, 1, iic_adr_fields },
+	{ IIC_CR, 64, 8, REGISTER_TYPE_RW, 0, 8, iic_cr_fields },
+	{ IIC_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, iic_dgie_fields },
+	{ IIC_GPO, 73, 1, REGISTER_TYPE_RW, 0, 1, iic_gpo_fields },
+	{ IIC_IER, 10, 8, REGISTER_TYPE_RW, 0, 8, iic_ier_fields },
+	{ IIC_ISR, 8, 8, REGISTER_TYPE_RW, 0, 8, iic_isr_fields },
+	{ IIC_RX_FIFO, 67, 8, REGISTER_TYPE_RO, 0, 1, iic_rx_fifo_fields },
+	{	IIC_RX_FIFO_OCY, 70, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_rx_fifo_ocy_fields
+	},
+	{	IIC_RX_FIFO_PIRQ, 72, 4, REGISTER_TYPE_RW, 0, 1,
+		iic_rx_fifo_pirq_fields
+	},
+	{ IIC_SOFTR, 16, 4, REGISTER_TYPE_WO, 0, 1, iic_softr_fields },
+	{ IIC_SR, 65, 8, REGISTER_TYPE_RO, 192, 8, iic_sr_fields },
+	{ IIC_TBUF, 78, 32, REGISTER_TYPE_RW, 0, 1, iic_tbuf_fields },
+	{ IIC_TEN_ADR, 71, 3, REGISTER_TYPE_RO, 0, 1, iic_ten_adr_fields },
+	{ IIC_THDDAT, 81, 32, REGISTER_TYPE_RW, 0, 1, iic_thddat_fields },
+	{ IIC_THDSTA, 76, 32, REGISTER_TYPE_RW, 0, 1, iic_thdsta_fields },
+	{ IIC_THIGH, 79, 32, REGISTER_TYPE_RW, 0, 1, iic_thigh_fields },
+	{ IIC_TLOW, 80, 32, REGISTER_TYPE_RW, 0, 1, iic_tlow_fields },
+	{ IIC_TSUDAT, 77, 32, REGISTER_TYPE_RW, 0, 1, iic_tsudat_fields },
+	{ IIC_TSUSTA, 74, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusta_fields },
+	{ IIC_TSUSTO, 75, 32, REGISTER_TYPE_RW, 0, 1, iic_tsusto_fields },
+	{ IIC_TX_FIFO, 66, 10, REGISTER_TYPE_WO, 0, 3, iic_tx_fifo_fields },
+	{	IIC_TX_FIFO_OCY, 69, 4, REGISTER_TYPE_RO, 0, 1,
+		iic_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t ins_rcp_ctrl_fields[] = {
+	{ INS_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ INS_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t ins_rcp_data_fields[] = {
+	{ INS_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ INS_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ INS_RCP_DATA_OFS, 10, 5, 0x0000 },
+};
+
+static nt_fpga_register_init_t ins_registers[] = {
+	{ INS_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, ins_rcp_ctrl_fields },
+	{ INS_RCP_DATA, 1, 23, REGISTER_TYPE_WO, 0, 3, ins_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t km_cam_ctrl_fields[] = {
+	{ KM_CAM_CTRL_ADR, 13, 0, 0x0000 },
+	{ KM_CAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_cam_data_fields[] = {
+	{ KM_CAM_DATA_FT0, 4, 192, 0x0000 },
+	{ KM_CAM_DATA_FT1, 4, 196, 0x0000 },
+	{ KM_CAM_DATA_FT2, 4, 200, 0x0000 },
+	{ KM_CAM_DATA_FT3, 4, 204, 0x0000 },
+	{ KM_CAM_DATA_FT4, 4, 208, 0x0000 },
+	{ KM_CAM_DATA_FT5, 4, 212, 0x0000 },
+	{ KM_CAM_DATA_W0, 32, 0, 0x0000 },
+	{ KM_CAM_DATA_W1, 32, 32, 0x0000 },
+	{ KM_CAM_DATA_W2, 32, 64, 0x0000 },
+	{ KM_CAM_DATA_W3, 32, 96, 0x0000 },
+	{ KM_CAM_DATA_W4, 32, 128, 0x0000 },
+	{ KM_CAM_DATA_W5, 32, 160, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_ctrl_fields[] = {
+	{ KM_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ KM_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_rcp_data_fields[] = {
+	{ KM_RCP_DATA_BANK_A, 12, 694, 0x0000 },
+	{ KM_RCP_DATA_BANK_B, 12, 706, 0x0000 },
+	{ KM_RCP_DATA_DUAL, 1, 651, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 },
+	{ KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 },
+	{ KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 },
+	{ KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 },
+	{ KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 },
+	{ KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 },
+	{ KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 },
+	{ KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 },
+	{ KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 },
+	{ KM_RCP_DATA_EL_A, 4, 653, 0x0000 },
+	{ KM_RCP_DATA_EL_B, 3, 657, 0x0000 },
+	{ KM_RCP_DATA_FTM_A, 16, 662, 0x0000 },
+	{ KM_RCP_DATA_FTM_B, 16, 678, 0x0000 },
+	{ KM_RCP_DATA_INFO_A, 1, 660, 0x0000 },
+	{ KM_RCP_DATA_INFO_B, 1, 661, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 },
+	{ KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 },
+	{ KM_RCP_DATA_KL_A, 4, 718, 0x0000 },
+	{ KM_RCP_DATA_KL_B, 3, 722, 0x0000 },
+	{ KM_RCP_DATA_MASK_A, 384, 75, 0x0000 },
+	{ KM_RCP_DATA_MASK_B, 192, 459, 0x0000 },
+	{ KM_RCP_DATA_PAIRED, 1, 652, 0x0000 },
+	{ KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 },
+	{ KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 },
+	{ KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 },
+	{ KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 },
+	{ KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 },
+	{ KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 },
+	{ KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 },
+	{ KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 },
+	{ KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 },
+	{ KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 },
+	{ KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_status_fields[] = {
+	{ KM_STATUS_TCQ_RDY, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_ctrl_fields[] = {
+	{ KM_TCAM_CTRL_ADR, 14, 0, 0x0000 },
+	{ KM_TCAM_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcam_data_fields[] = {
+	{ KM_TCAM_DATA_T, 72, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_ctrl_fields[] = {
+	{ KM_TCI_CTRL_ADR, 10, 0, 0x0000 },
+	{ KM_TCI_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tci_data_fields[] = {
+	{ KM_TCI_DATA_COLOR, 32, 0, 0x0000 },
+	{ KM_TCI_DATA_FT, 4, 32, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_ctrl_fields[] = {
+	{ KM_TCQ_CTRL_ADR, 7, 0, 0x0000 },
+	{ KM_TCQ_CTRL_CNT, 5, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t km_tcq_data_fields[] = {
+	{ KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 },
+	{ KM_TCQ_DATA_QUAL, 3, 12, 0x0000 },
+};
+
+static nt_fpga_register_init_t km_registers[] = {
+	{ KM_CAM_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, km_cam_ctrl_fields },
+	{ KM_CAM_DATA, 3, 216, REGISTER_TYPE_WO, 0, 12, km_cam_data_fields },
+	{ KM_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, km_rcp_ctrl_fields },
+	{ KM_RCP_DATA, 1, 781, REGISTER_TYPE_WO, 0, 44, km_rcp_data_fields },
+	{ KM_STATUS, 10, 1, REGISTER_TYPE_RO, 0, 1, km_status_fields },
+	{ KM_TCAM_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, km_tcam_ctrl_fields },
+	{ KM_TCAM_DATA, 5, 72, REGISTER_TYPE_WO, 0, 1, km_tcam_data_fields },
+	{ KM_TCI_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, km_tci_ctrl_fields },
+	{ KM_TCI_DATA, 7, 36, REGISTER_TYPE_WO, 0, 2, km_tci_data_fields },
+	{ KM_TCQ_CTRL, 8, 21, REGISTER_TYPE_WO, 0, 2, km_tcq_ctrl_fields },
+	{ KM_TCQ_DATA, 9, 15, REGISTER_TYPE_WO, 0, 2, km_tcq_data_fields },
+};
+
+static nt_fpga_field_init_t mac_pcs_bad_code_fields[] = {
+	{ MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_bip_err_fields[] = {
+	{ MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_block_lock_chg_fields[] = {
+	{ MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_debounce_ctrl_fields[] = {
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 },
+	{ MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 },
+};
+
+static nt_fpga_field_init_t mac_pcs_drp_ctrl_fields[] = {
+	{ MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 },
+	{ MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 },
+	{ MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 },
+	{ MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 },
+	{ MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ctrl_fields[] = {
+	{ MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_cw_cnt_fields[] = {
+	{ MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_0_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_1_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_2_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_err_cnt_3_fields[] = {
+	{ MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_0_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_1_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_2_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_dly_3_fields[] = {
+	{ MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_lane_map_fields[] = {
+	{ MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_stat_fields[] = {
+	{ MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 },
+	{ MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 },
+	{ MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 },
+	{ MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 },
+	{ MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 },
+	{ MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 },
+	{ MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_fec_ucw_cnt_fields[] = {
+	{ MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_rx_fields[] = {
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 },
+	{ MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 },
+	{ MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 },
+	{ MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 },
+	{ MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_ctl_tx_fields[] = {
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_diff_ctl_fields[] = {
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 },
+	{ MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_loop_fields[] = {
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 },
+	{ MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_post_cursor_fields[] = {
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 },
+	{ MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_prbs_sel_fields[] = {
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 },
+	{ MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_pre_cursor_fields[] = {
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 },
+	{ MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_rx_buf_stat_fields[] = {
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 },
+	{ MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_ctl_fields[] = {
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 },
+	{ MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_scan_stat_fields[] = {
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_gty_stat_fields[] = {
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 },
+	{ MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 },
+	{ MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_link_summary_fields[] = {
+	{ MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 },
+	{ MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 },
+	{ MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_mac_pcs_config_fields[] = {
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 },
+	{ MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_max_pkt_len_fields[] = {
+	{ MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phymac_misc_fields[] = {
+	{ MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 },
+	{ MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t mac_pcs_phy_stat_fields[] = {
+	{ MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 },
+	{ MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 },
+	{ MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_rx_latch_fields[] = {
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_stat_pcs_tx_fields[] = {
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 },
+	{ MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_fields[] = {
+	{ MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_synced_err_fields[] = {
+	{ MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_test_err_fields[] = {
+	{ MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_timestamp_comp_fields[] = {
+	{ MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 },
+	{ MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_pcs_vl_demuxed_chg_fields[] = {
+	{ MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_pcs_registers[] = {
+	{	MAC_PCS_BAD_CODE, 26, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bad_code_fields
+	},
+	{	MAC_PCS_BIP_ERR, 31, 640, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_bip_err_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK, 27, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_fields
+	},
+	{	MAC_PCS_BLOCK_LOCK_CHG, 28, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_block_lock_chg_fields
+	},
+	{	MAC_PCS_DEBOUNCE_CTRL, 1, 19, REGISTER_TYPE_RW, 264714, 4,
+		mac_pcs_debounce_ctrl_fields
+	},
+	{	MAC_PCS_DRP_CTRL, 43, 32, REGISTER_TYPE_MIXED, 0, 6,
+		mac_pcs_drp_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CTRL, 2, 5, REGISTER_TYPE_RW, 0, 1,
+		mac_pcs_fec_ctrl_fields
+	},
+	{	MAC_PCS_FEC_CW_CNT, 9, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_cw_cnt_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_0, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_0_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_1, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_1_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_2, 13, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_2_fields
+	},
+	{	MAC_PCS_FEC_ERR_CNT_3, 14, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_err_cnt_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_0, 5, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_0_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_1, 6, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_1_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_2, 7, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_2_fields
+	},
+	{	MAC_PCS_FEC_LANE_DLY_3, 8, 14, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_dly_3_fields
+	},
+	{	MAC_PCS_FEC_LANE_MAP, 4, 8, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_lane_map_fields
+	},
+	{	MAC_PCS_FEC_STAT, 3, 11, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_fec_stat_fields
+	},
+	{	MAC_PCS_FEC_UCW_CNT, 10, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_fec_ucw_cnt_fields
+	},
+	{	MAC_PCS_GTY_CTL_RX, 38, 28, REGISTER_TYPE_RW, 0, 20,
+		mac_pcs_gty_ctl_rx_fields
+	},
+	{	MAC_PCS_GTY_CTL_TX, 39, 8, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_ctl_tx_fields
+	},
+	{	MAC_PCS_GTY_DIFF_CTL, 35, 20, REGISTER_TYPE_RW, 811800, 4,
+		mac_pcs_gty_diff_ctl_fields
+	},
+	{	MAC_PCS_GTY_LOOP, 20, 12, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_loop_fields
+	},
+	{	MAC_PCS_GTY_POST_CURSOR, 36, 20, REGISTER_TYPE_RW, 676500, 4,
+		mac_pcs_gty_post_cursor_fields
+	},
+	{	MAC_PCS_GTY_PRBS_SEL, 40, 32, REGISTER_TYPE_RW, 0, 8,
+		mac_pcs_gty_prbs_sel_fields
+	},
+	{	MAC_PCS_GTY_PRE_CURSOR, 37, 20, REGISTER_TYPE_RW, 0, 4,
+		mac_pcs_gty_pre_cursor_fields
+	},
+	{	MAC_PCS_GTY_RX_BUF_STAT, 34, 24, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_rx_buf_stat_fields
+	},
+	{	MAC_PCS_GTY_SCAN_CTL, 41, 16, REGISTER_TYPE_RW, 0, 16,
+		mac_pcs_gty_scan_ctl_fields
+	},
+	{	MAC_PCS_GTY_SCAN_STAT, 42, 8, REGISTER_TYPE_RO, 0, 8,
+		mac_pcs_gty_scan_stat_fields
+	},
+	{	MAC_PCS_GTY_STAT, 33, 16, REGISTER_TYPE_RO, 0, 12,
+		mac_pcs_gty_stat_fields
+	},
+	{	MAC_PCS_LINK_SUMMARY, 0, 19, REGISTER_TYPE_RO, 0, 11,
+		mac_pcs_link_summary_fields
+	},
+	{	MAC_PCS_MAC_PCS_CONFIG, 19, 12, REGISTER_TYPE_RW, 272, 12,
+		mac_pcs_mac_pcs_config_fields
+	},
+	{	MAC_PCS_MAX_PKT_LEN, 17, 14, REGISTER_TYPE_RW, 10000, 1,
+		mac_pcs_max_pkt_len_fields
+	},
+	{	MAC_PCS_PHYMAC_MISC, 16, 8, REGISTER_TYPE_MIXED, 9, 5,
+		mac_pcs_phymac_misc_fields
+	},
+	{	MAC_PCS_PHY_STAT, 15, 3, REGISTER_TYPE_RO, 0, 3,
+		mac_pcs_phy_stat_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX, 21, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_fields
+	},
+	{	MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_rx_latch_fields
+	},
+	{	MAC_PCS_STAT_PCS_TX, 23, 10, REGISTER_TYPE_RO, 0, 10,
+		mac_pcs_stat_pcs_tx_fields
+	},
+	{	MAC_PCS_SYNCED, 24, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_fields
+	},
+	{	MAC_PCS_SYNCED_ERR, 25, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_synced_err_fields
+	},
+	{	MAC_PCS_TEST_ERR, 32, 16, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_test_err_fields
+	},
+	{	MAC_PCS_TIMESTAMP_COMP, 18, 32, REGISTER_TYPE_RW, 94373291, 2,
+		mac_pcs_timestamp_comp_fields
+	},
+	{	MAC_PCS_VL_DEMUXED, 29, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_fields
+	},
+	{	MAC_PCS_VL_DEMUXED_CHG, 30, 20, REGISTER_TYPE_RO, 0, 1,
+		mac_pcs_vl_demuxed_chg_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_rx_bad_fcs_fields[] = {
+	{ MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_fragment_fields[] = {
+	{ MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_bad_fcs_fields[] = {
+	{ MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_packet_small_fields[] = {
+	{ MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_bytes_fields[] = {
+	{ MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_bytes_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_good_packets_fields[] = {
+	{ MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_total_packets_fields[] = {
+	{ MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_rx_undersize_fields[] = {
+	{ MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_rx_registers[] = {
+	{	MAC_RX_BAD_FCS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_bad_fcs_fields
+	},
+	{	MAC_RX_FRAGMENT, 6, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_fragment_fields
+	},
+	{	MAC_RX_PACKET_BAD_FCS, 7, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_bad_fcs_fields
+	},
+	{	MAC_RX_PACKET_SMALL, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_packet_small_fields
+	},
+	{	MAC_RX_TOTAL_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_BYTES, 5, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_bytes_fields
+	},
+	{	MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_good_packets_fields
+	},
+	{	MAC_RX_TOTAL_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_total_packets_fields
+	},
+	{	MAC_RX_UNDERSIZE, 8, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_rx_undersize_fields
+	},
+};
+
+static nt_fpga_field_init_t mac_tx_packet_small_fields[] = {
+	{ MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_bytes_fields[] = {
+	{ MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_bytes_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_good_packets_fields[] = {
+	{ MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t mac_tx_total_packets_fields[] = {
+	{ MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t mac_tx_registers[] = {
+	{	MAC_TX_PACKET_SMALL, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_packet_small_fields
+	},
+	{	MAC_TX_TOTAL_BYTES, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_BYTES, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_bytes_fields
+	},
+	{	MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_good_packets_fields
+	},
+	{	MAC_TX_TOTAL_PACKETS, 0, 32, REGISTER_TYPE_RO, 0, 1,
+		mac_tx_total_packets_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_ctrl_fields[] = {
+	{ PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rdaddr_fields[] = {
+	{ PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata0_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata1_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rddata2_fields[] = {
+	{ PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 },
+	{ PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_rd_tg_tg_rd_run_fields[] = {
+	{ PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_rd_tg_registers[] = {
+	{	PCI_RD_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_rd_tg_tg_ctrl_fields
+	},
+	{	PCI_RD_TG_TG_RDADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rdaddr_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata0_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rddata1_fields
+	},
+	{	PCI_RD_TG_TG_RDDATA2, 2, 32, REGISTER_TYPE_WO, 0, 4,
+		pci_rd_tg_tg_rddata2_fields
+	},
+	{	PCI_RD_TG_TG_RD_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_rd_tg_tg_rd_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_ta_control_fields[] = {
+	{ PCI_TA_CONTROL_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_ta_length_error_fields[] = {
+	{ PCI_TA_LENGTH_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_bad_fields[] = {
+	{ PCI_TA_PACKET_BAD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_packet_good_fields[] = {
+	{ PCI_TA_PACKET_GOOD_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t pci_ta_payload_error_fields[] = {
+	{ PCI_TA_PAYLOAD_ERROR_AMOUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t pci_ta_registers[] = {
+	{ PCI_TA_CONTROL, 0, 1, REGISTER_TYPE_WO, 0, 1, pci_ta_control_fields },
+	{	PCI_TA_LENGTH_ERROR, 3, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_length_error_fields
+	},
+	{	PCI_TA_PACKET_BAD, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_bad_fields
+	},
+	{	PCI_TA_PACKET_GOOD, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_packet_good_fields
+	},
+	{	PCI_TA_PAYLOAD_ERROR, 4, 32, REGISTER_TYPE_RO, 0, 1,
+		pci_ta_payload_error_fields
+	},
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_ctrl_fields[] = {
+	{ PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_seq_fields[] = {
+	{ PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wraddr_fields[] = {
+	{ PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata0_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata1_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wrdata2_fields[] = {
+	{ PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 },
+	{ PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t pci_wr_tg_tg_wr_run_fields[] = {
+	{ PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 },
+};
+
+static nt_fpga_register_init_t pci_wr_tg_registers[] = {
+	{	PCI_WR_TG_TG_CTRL, 5, 1, REGISTER_TYPE_RO, 0, 1,
+		pci_wr_tg_tg_ctrl_fields
+	},
+	{	PCI_WR_TG_TG_SEQ, 6, 16, REGISTER_TYPE_RW, 0, 1,
+		pci_wr_tg_tg_seq_fields
+	},
+	{	PCI_WR_TG_TG_WRADDR, 3, 9, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wraddr_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA0, 0, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata0_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA1, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wrdata1_fields
+	},
+	{	PCI_WR_TG_TG_WRDATA2, 2, 32, REGISTER_TYPE_WO, 0, 5,
+		pci_wr_tg_tg_wrdata2_fields
+	},
+	{	PCI_WR_TG_TG_WR_RUN, 4, 16, REGISTER_TYPE_WO, 0, 1,
+		pci_wr_tg_tg_wr_run_fields
+	},
+};
+
+static nt_fpga_field_init_t pdb_config_fields[] = {
+	{ PDB_CONFIG_PORT_OFS, 6, 3, 0 },
+	{ PDB_CONFIG_TS_FORMAT, 3, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_ctrl_fields[] = {
+	{ PDB_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ PDB_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t pdb_rcp_data_fields[] = {
+	{ PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 },
+	{ PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 },
+	{ PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 },
+	{ PDB_RCP_DATA_DESC_LEN, 5, 4, 0 },
+	{ PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 },
+	{ PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 },
+	{ PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 },
+	{ PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 },
+	{ PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 },
+	{ PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 },
+	{ PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 },
+	{ PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 },
+	{ PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 },
+	{ PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 },
+	{ PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 },
+};
+
+static nt_fpga_register_init_t pdb_registers[] = {
+	{ PDB_CONFIG, 2, 10, REGISTER_TYPE_WO, 0, 2, pdb_config_fields },
+	{ PDB_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields },
+	{ PDB_RCP_DATA, 1, 67, REGISTER_TYPE_WO, 0, 18, pdb_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t pdi_cr_fields[] = {
+	{ PDI_CR_EN, 1, 0, 0 },	  { PDI_CR_PARITY, 1, 4, 0 },
+	{ PDI_CR_RST, 1, 1, 0 },  { PDI_CR_RXRST, 1, 2, 0 },
+	{ PDI_CR_STOP, 1, 5, 0 }, { PDI_CR_TXRST, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_drr_fields[] = {
+	{ PDI_DRR_DRR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_dtr_fields[] = {
+	{ PDI_DTR_DTR, 8, 0, 0 },
+};
+
+static nt_fpga_field_init_t pdi_pre_fields[] = {
+	{ PDI_PRE_PRE, 7, 0, 3 },
+};
+
+static nt_fpga_field_init_t pdi_sr_fields[] = {
+	{ PDI_SR_DISABLE_BUSY, 1, 2, 0 }, { PDI_SR_DONE, 1, 0, 0 },
+	{ PDI_SR_ENABLE_BUSY, 1, 1, 0 },  { PDI_SR_FRAME_ERR, 1, 5, 0 },
+	{ PDI_SR_OVERRUN_ERR, 1, 7, 0 },  { PDI_SR_PARITY_ERR, 1, 6, 0 },
+	{ PDI_SR_RXLVL, 7, 8, 0 },	  { PDI_SR_RX_BUSY, 1, 4, 0 },
+	{ PDI_SR_TXLVL, 7, 15, 0 },	  { PDI_SR_TX_BUSY, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t pdi_srr_fields[] = {
+	{ PDI_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t pdi_registers[] = {
+	{ PDI_CR, 1, 6, REGISTER_TYPE_WO, 0, 6, pdi_cr_fields },
+	{ PDI_DRR, 4, 8, REGISTER_TYPE_RO, 0, 1, pdi_drr_fields },
+	{ PDI_DTR, 3, 8, REGISTER_TYPE_WO, 0, 1, pdi_dtr_fields },
+	{ PDI_PRE, 5, 7, REGISTER_TYPE_WO, 3, 1, pdi_pre_fields },
+	{ PDI_SR, 2, 22, REGISTER_TYPE_RO, 0, 10, pdi_sr_fields },
+	{ PDI_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, pdi_srr_fields },
+};
+
+static nt_fpga_field_init_t ptp1588_conf_fields[] = {
+	{ PTP1588_CONF_MII_RX_TX_LOOP, 1, 0, 0 },
+	{ PTP1588_CONF_MII_TX_RX_LOOP, 1, 1, 0 },
+	{ PTP1588_CONF_PHY_RST1, 1, 10, 1 },
+	{ PTP1588_CONF_PHY_RST2, 1, 11, 1 },
+	{ PTP1588_CONF_PTP_CTRL_LOCAL, 1, 24, 0 },
+	{ PTP1588_CONF_PTP_RX_CTRL, 2, 19, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL, 2, 21, 0 },
+	{ PTP1588_CONF_PTP_TX_CTRL_OS, 1, 23, 0 },
+	{ PTP1588_CONF_RX_IGNORE_DEST_ADDR, 1, 25, 0 },
+	{ PTP1588_CONF_TG_CMD, 2, 13, 0 },
+	{ PTP1588_CONF_TG_MODE, 1, 12, 0 },
+	{ PTP1588_CONF_TSM_MI_ACK, 1, 16, 0 },
+	{ PTP1588_CONF_TSM_MI_BUSY, 1, 15, 0 },
+	{ PTP1588_CONF_TSM_MI_ENA, 1, 18, 0 },
+	{ PTP1588_CONF_TSM_MI_REQ, 1, 17, 0 },
+	{ PTP1588_CONF_TX_IFG, 8, 2, 0 },
+	{ PTP1588_CONF_TX_IGNORE_DEST_ADDR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_fields[] = {
+	{ PTP1588_GP_DATA_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_lh_fields[] = {
+	{ PTP1588_GP_DATA_LH_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_DATA_LH_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_data_ll_fields[] = {
+	{ PTP1588_GP_DATA_LL_GPIO, 9, 1, 511 },
+	{ PTP1588_GP_DATA_LL_PWRDOWN_INTN, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_gp_oe_fields[] = {
+	{ PTP1588_GP_OE_GPIO, 9, 1, 0 },
+	{ PTP1588_GP_OE_PWRDOWN_INTN, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_inband_stat_fields[] = {
+	{ PTP1588_MAC_INBAND_STAT_DUPLEX, 1, 3, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_LINK, 1, 0, 0x0000 },
+	{ PTP1588_MAC_INBAND_STAT_SPEED, 2, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_conf_fields[] = {
+	{ PTP1588_MAC_MI_CONF_ACCESS_TYPE, 1, 16, 0 },
+	{ PTP1588_MAC_MI_CONF_ADDRESS, 16, 0, 0 },
+	{ PTP1588_MAC_MI_CONF_RDY, 1, 17, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_mac_mi_data_fields[] = {
+	{ PTP1588_MAC_MI_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_lsb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_adr_msb_fields[] = {
+	{ PTP1588_RX_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_rx_host_conf_fields[] = {
+	{ PTP1588_RX_HOST_CONF_ENA, 1, 11, 0 },
+	{ PTP1588_RX_HOST_CONF_RDPTR, 11, 0, 0 },
+	{ PTP1588_RX_HOST_CONF_REDUCED, 1, 12, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_fields[] = {
+	{ PTP1588_STAT_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_stat_conf_fields[] = {
+	{ PTP1588_STAT_CONF_INDEX, 5, 0, 0 },
+	{ PTP1588_STAT_CONF_LOCK, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_first_dat_fields[] = {
+	{ PTP1588_TX_FIRST_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last1_dat_fields[] = {
+	{ PTP1588_TX_LAST1_DAT_DAT, 8, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last2_dat_fields[] = {
+	{ PTP1588_TX_LAST2_DAT_DAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last3_dat_fields[] = {
+	{ PTP1588_TX_LAST3_DAT_DAT, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_last4_dat_fields[] = {
+	{ PTP1588_TX_LAST4_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_mid_dat_fields[] = {
+	{ PTP1588_TX_MID_DAT_DAT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_packet_state_fields[] = {
+	{ PTP1588_TX_PACKET_STATE_MSG_TYPE, 4, 16, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_PCK_TYPE, 3, 20, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_SEQ_ID, 16, 0, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_TEST_MARGIN, 7, 23, 0x0000 },
+	{ PTP1588_TX_PACKET_STATE_VALID, 1, 30, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_status_fields[] = {
+	{ PTP1588_TX_STATUS_DB_ERR, 1, 10, 1 },
+	{ PTP1588_TX_STATUS_DB_FULL, 1, 9, 1 },
+	{ PTP1588_TX_STATUS_FIFO_STATUS, 9, 0, 0 },
+	{ PTP1588_TX_STATUS_RDY, 1, 11, 1 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_ns_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_NS_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t ptp1588_tx_timestamp_sec_fields[] = {
+	{ PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t ptp1588_registers[] = {
+	{	PTP1588_CONF, 0, 27, REGISTER_TYPE_MIXED, 3072, 17,
+		ptp1588_conf_fields
+	},
+	{	PTP1588_GP_DATA, 20, 10, REGISTER_TYPE_RW, 0, 2,
+		ptp1588_gp_data_fields
+	},
+	{	PTP1588_GP_DATA_LH, 22, 10, REGISTER_TYPE_RO, 0, 2,
+		ptp1588_gp_data_lh_fields
+	},
+	{	PTP1588_GP_DATA_LL, 21, 10, REGISTER_TYPE_RO, 1023, 2,
+		ptp1588_gp_data_ll_fields
+	},
+	{ PTP1588_GP_OE, 19, 10, REGISTER_TYPE_WO, 0, 2, ptp1588_gp_oe_fields },
+	{	PTP1588_MAC_INBAND_STAT, 3, 4, REGISTER_TYPE_RO, 0, 3,
+		ptp1588_mac_inband_stat_fields
+	},
+	{	PTP1588_MAC_MI_CONF, 17, 18, REGISTER_TYPE_MIXED, 131072, 3,
+		ptp1588_mac_mi_conf_fields
+	},
+	{	PTP1588_MAC_MI_DATA, 18, 32, REGISTER_TYPE_RW, 0, 1,
+		ptp1588_mac_mi_data_fields
+	},
+	{	PTP1588_RX_HOST_ADR_LSB, 8, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_lsb_fields
+	},
+	{	PTP1588_RX_HOST_ADR_MSB, 9, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_rx_host_adr_msb_fields
+	},
+	{	PTP1588_RX_HOST_CONF, 7, 13, REGISTER_TYPE_RW, 0, 3,
+		ptp1588_rx_host_conf_fields
+	},
+	{ PTP1588_STAT, 6, 32, REGISTER_TYPE_RO, 0, 1, ptp1588_stat_fields },
+	{	PTP1588_STAT_CONF, 5, 6, REGISTER_TYPE_WO, 0, 2,
+		ptp1588_stat_conf_fields
+	},
+	{	PTP1588_TX_FIRST_DAT, 10, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_first_dat_fields
+	},
+	{	PTP1588_TX_LAST1_DAT, 12, 8, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last1_dat_fields
+	},
+	{	PTP1588_TX_LAST2_DAT, 13, 16, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last2_dat_fields
+	},
+	{	PTP1588_TX_LAST3_DAT, 14, 24, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last3_dat_fields
+	},
+	{	PTP1588_TX_LAST4_DAT, 15, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_last4_dat_fields
+	},
+	{	PTP1588_TX_MID_DAT, 11, 32, REGISTER_TYPE_WO, 0, 1,
+		ptp1588_tx_mid_dat_fields
+	},
+	{	PTP1588_TX_PACKET_STATE, 4, 31, REGISTER_TYPE_RO, 0, 5,
+		ptp1588_tx_packet_state_fields
+	},
+	{	PTP1588_TX_STATUS, 16, 12, REGISTER_TYPE_RO, 3584, 4,
+		ptp1588_tx_status_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_NS, 2, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_ns_fields
+	},
+	{	PTP1588_TX_TIMESTAMP_SEC, 1, 32, REGISTER_TYPE_RO, 0, 1,
+		ptp1588_tx_timestamp_sec_fields
+	},
+};
+
+static nt_fpga_field_init_t qsl_qen_ctrl_fields[] = {
+	{ QSL_QEN_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_QEN_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qen_data_fields[] = {
+	{ QSL_QEN_DATA_EN, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_ctrl_fields[] = {
+	{ QSL_QST_CTRL_ADR, 12, 0, 0x0000 },
+	{ QSL_QST_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_qst_data_fields[] = {
+	{ QSL_QST_DATA_LRE, 1, 9, 0x0000 },
+	{ QSL_QST_DATA_QEN, 1, 7, 0x0000 },
+	{ QSL_QST_DATA_QUEUE, 7, 0, 0x0000 },
+	{ QSL_QST_DATA_TCI, 16, 10, 0x0000 },
+	{ QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 },
+	{ QSL_QST_DATA_VEN, 1, 26, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_ctrl_fields[] = {
+	{ QSL_RCP_CTRL_ADR, 5, 0, 0x0000 },
+	{ QSL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_rcp_data_fields[] = {
+	{ QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 },
+	{ QSL_RCP_DATA_DROP, 2, 1, 0x0000 },
+	{ QSL_RCP_DATA_LR, 2, 51, 0x0000 },
+	{ QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 },
+	{ QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 },
+	{ QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 },
+	{ QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 },
+	{ QSL_RCP_DATA_TSA, 1, 53, 0x0000 },
+	{ QSL_RCP_DATA_VLI, 2, 54, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_ctrl_fields[] = {
+	{ QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 },
+	{ QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t qsl_unmq_data_fields[] = {
+	{ QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 },
+	{ QSL_UNMQ_DATA_EN, 1, 7, 0x0000 },
+};
+
+static nt_fpga_register_init_t qsl_registers[] = {
+	{ QSL_QEN_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, qsl_qen_ctrl_fields },
+	{ QSL_QEN_DATA, 5, 4, REGISTER_TYPE_WO, 0, 1, qsl_qen_data_fields },
+	{ QSL_QST_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, qsl_qst_ctrl_fields },
+	{ QSL_QST_DATA, 3, 27, REGISTER_TYPE_WO, 0, 6, qsl_qst_data_fields },
+	{ QSL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields },
+	{ QSL_RCP_DATA, 1, 56, REGISTER_TYPE_WO, 0, 9, qsl_rcp_data_fields },
+	{ QSL_UNMQ_CTRL, 6, 32, REGISTER_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields },
+	{ QSL_UNMQ_DATA, 7, 8, REGISTER_TYPE_WO, 0, 2, qsl_unmq_data_fields },
+};
+
+static nt_fpga_field_init_t qspi_cr_fields[] = {
+	{ QSPI_CR_CPHA, 1, 4, 0 },  { QSPI_CR_CPOL, 1, 3, 0 },
+	{ QSPI_CR_LOOP, 1, 0, 0 },  { QSPI_CR_LSBF, 1, 9, 0 },
+	{ QSPI_CR_MSSAE, 1, 7, 1 }, { QSPI_CR_MST, 1, 2, 0 },
+	{ QSPI_CR_MTI, 1, 8, 1 },   { QSPI_CR_RXFIFO_RST, 1, 6, 0 },
+	{ QSPI_CR_SPE, 1, 1, 0 },   { QSPI_CR_TXFIFO_RST, 1, 5, 0 },
+};
+
+static nt_fpga_field_init_t qspi_dgie_fields[] = {
+	{ QSPI_DGIE_GIE, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t qspi_drr_fields[] = {
+	{ QSPI_DRR_DATA_VAL, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_dtr_fields[] = {
+	{ QSPI_DTR_DATA_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_ier_fields[] = {
+	{ QSPI_IER_CMD_ERR, 1, 13, 0 }, { QSPI_IER_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_IER_DRR_FULL, 1, 4, 0 }, { QSPI_IER_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_IER_DRR_OR, 1, 5, 0 },	{ QSPI_IER_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_IER_DTR_UR, 1, 3, 0 },	{ QSPI_IER_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_IER_MODF, 1, 0, 0 },	{ QSPI_IER_MSB_ERR, 1, 11, 0 },
+	{ QSPI_IER_SLV_ERR, 1, 10, 0 }, { QSPI_IER_SLV_MODF, 1, 1, 0 },
+	{ QSPI_IER_SLV_MS, 1, 7, 0 },	{ QSPI_IER_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_isr_fields[] = {
+	{ QSPI_ISR_CMD_ERR, 1, 13, 0 }, { QSPI_ISR_CPOL_CPHA_ERR, 1, 9, 0 },
+	{ QSPI_ISR_DRR_FULL, 1, 4, 0 }, { QSPI_ISR_DRR_NEMPTY, 1, 8, 0 },
+	{ QSPI_ISR_DRR_OR, 1, 5, 0 },	{ QSPI_ISR_DTR_EMPTY, 1, 2, 0 },
+	{ QSPI_ISR_DTR_UR, 1, 3, 0 },	{ QSPI_ISR_LOOP_ERR, 1, 12, 0 },
+	{ QSPI_ISR_MODF, 1, 0, 0 },	{ QSPI_ISR_MSB_ERR, 1, 11, 0 },
+	{ QSPI_ISR_SLV_ERR, 1, 10, 0 }, { QSPI_ISR_SLV_MODF, 1, 1, 0 },
+	{ QSPI_ISR_SLV_MS, 1, 7, 0 },	{ QSPI_ISR_TXFIFO_HEMPTY, 1, 6, 0 },
+};
+
+static nt_fpga_field_init_t qspi_rx_fifo_ocy_fields[] = {
+	{ QSPI_RX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t qspi_sr_fields[] = {
+	{ QSPI_SR_CMD_ERR, 1, 10, 0 }, { QSPI_SR_CPOL_CPHA_ERR, 1, 6, 0 },
+	{ QSPI_SR_LOOP_ERR, 1, 9, 0 }, { QSPI_SR_MODF, 1, 4, 0 },
+	{ QSPI_SR_MSB_ERR, 1, 8, 0 },  { QSPI_SR_RXEMPTY, 1, 0, 1 },
+	{ QSPI_SR_RXFULL, 1, 1, 0 },   { QSPI_SR_SLVMS, 1, 5, 1 },
+	{ QSPI_SR_SLV_ERR, 1, 7, 0 },  { QSPI_SR_TXEMPTY, 1, 2, 1 },
+	{ QSPI_SR_TXFULL, 1, 3, 0 },
+};
+
+static nt_fpga_field_init_t qspi_srr_fields[] = {
+	{ QSPI_SRR_RST, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t qspi_ssr_fields[] = {
+	{ QSPI_SSR_SEL_SLV, 32, 0, 4294967295 },
+};
+
+static nt_fpga_field_init_t qspi_tx_fifo_ocy_fields[] = {
+	{ QSPI_TX_FIFO_OCY_OCY_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_register_init_t qspi_registers[] = {
+	{ QSPI_CR, 24, 10, REGISTER_TYPE_RW, 384, 10, qspi_cr_fields },
+	{ QSPI_DGIE, 7, 32, REGISTER_TYPE_RW, 0, 1, qspi_dgie_fields },
+	{ QSPI_DRR, 27, 32, REGISTER_TYPE_RO, 0, 1, qspi_drr_fields },
+	{ QSPI_DTR, 26, 32, REGISTER_TYPE_WO, 0, 1, qspi_dtr_fields },
+	{ QSPI_IER, 10, 14, REGISTER_TYPE_RW, 0, 14, qspi_ier_fields },
+	{ QSPI_ISR, 8, 14, REGISTER_TYPE_RW, 0, 14, qspi_isr_fields },
+	{	QSPI_RX_FIFO_OCY, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_rx_fifo_ocy_fields
+	},
+	{ QSPI_SR, 25, 11, REGISTER_TYPE_RO, 37, 11, qspi_sr_fields },
+	{ QSPI_SRR, 16, 4, REGISTER_TYPE_WO, 0, 1, qspi_srr_fields },
+	{ QSPI_SSR, 28, 32, REGISTER_TYPE_RW, 4294967295, 1, qspi_ssr_fields },
+	{	QSPI_TX_FIFO_OCY, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		qspi_tx_fifo_ocy_fields
+	},
+};
+
+static nt_fpga_field_init_t rac_dbg_ctrl_fields[] = {
+	{ RAC_DBG_CTRL_C, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_dbg_data_fields[] = {
+	{ RAC_DBG_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_free_fields[] = {
+	{ RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 },
+	{ RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 },
+	{ RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 },
+	{ RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 },
+	{ RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_buf_used_fields[] = {
+	{ RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 },
+	{ RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 },
+	{ RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_hi_fields[] = {
+	{ RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_lo_fields[] = {
+	{ RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_rd_fields[] = {
+	{ RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ib_wr_fields[] = {
+	{ RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_hi_fields[] = {
+	{ RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_lo_fields[] = {
+	{ RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_dma_ob_wr_fields[] = {
+	{ RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t rac_rab_ib_data_fields[] = {
+	{ RAC_RAB_IB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rac_rab_init_fields[] = {
+	{ RAC_RAB_INIT_RAB, 3, 0, 7 },
+};
+
+static nt_fpga_field_init_t rac_rab_ob_data_fields[] = {
+	{ RAC_RAB_OB_DATA_D, 32, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rac_registers[] = {
+	{ RAC_DBG_CTRL, 4200, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_ctrl_fields },
+	{ RAC_DBG_DATA, 4208, 32, REGISTER_TYPE_RW, 0, 1, rac_dbg_data_fields },
+	{	RAC_RAB_BUF_FREE, 4176, 32, REGISTER_TYPE_MIXED, 33489407, 5,
+		rac_rab_buf_free_fields
+	},
+	{	RAC_RAB_BUF_USED, 4184, 32, REGISTER_TYPE_MIXED, 0, 3,
+		rac_rab_buf_used_fields
+	},
+	{	RAC_RAB_DMA_IB_HI, 4360, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_hi_fields
+	},
+	{	RAC_RAB_DMA_IB_LO, 4352, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_lo_fields
+	},
+	{	RAC_RAB_DMA_IB_RD, 4424, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ib_rd_fields
+	},
+	{	RAC_RAB_DMA_IB_WR, 4416, 16, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ib_wr_fields
+	},
+	{	RAC_RAB_DMA_OB_HI, 4376, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_hi_fields
+	},
+	{	RAC_RAB_DMA_OB_LO, 4368, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_dma_ob_lo_fields
+	},
+	{	RAC_RAB_DMA_OB_WR, 4480, 16, REGISTER_TYPE_RO, 0, 1,
+		rac_rab_dma_ob_wr_fields
+	},
+	{	RAC_RAB_IB_DATA, 4160, 32, REGISTER_TYPE_WO, 0, 1,
+		rac_rab_ib_data_fields
+	},
+	{ RAC_RAB_INIT, 4192, 3, REGISTER_TYPE_RW, 7, 1, rac_rab_init_fields },
+	{	RAC_RAB_OB_DATA, 4168, 32, REGISTER_TYPE_RC1, 0, 1,
+		rac_rab_ob_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rfd_ctrl_fields[] = {
+	{ RFD_CTRL_CFP, 1, 2, 1 },
+	{ RFD_CTRL_ISL, 1, 0, 1 },
+	{ RFD_CTRL_PWMCW, 1, 1, 1 },
+};
+
+static nt_fpga_field_init_t rfd_max_frame_size_fields[] = {
+	{ RFD_MAX_FRAME_SIZE_MAX, 14, 0, 9018 },
+};
+
+static nt_fpga_field_init_t rfd_tnl_vlan_fields[] = {
+	{ RFD_TNL_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_TNL_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vlan_fields[] = {
+	{ RFD_VLAN_TPID0, 16, 0, 33024 },
+	{ RFD_VLAN_TPID1, 16, 16, 33024 },
+};
+
+static nt_fpga_field_init_t rfd_vxlan_fields[] = {
+	{ RFD_VXLAN_DP0, 16, 0, 4789 },
+	{ RFD_VXLAN_DP1, 16, 16, 4789 },
+};
+
+static nt_fpga_register_init_t rfd_registers[] = {
+	{ RFD_CTRL, 0, 3, REGISTER_TYPE_WO, 7, 3, rfd_ctrl_fields },
+	{	RFD_MAX_FRAME_SIZE, 1, 14, REGISTER_TYPE_WO, 9018, 1,
+		rfd_max_frame_size_fields
+	},
+	{	RFD_TNL_VLAN, 3, 32, REGISTER_TYPE_WO, 2164293888, 2,
+		rfd_tnl_vlan_fields
+	},
+	{ RFD_VLAN, 2, 32, REGISTER_TYPE_WO, 2164293888, 2, rfd_vlan_fields },
+	{ RFD_VXLAN, 4, 32, REGISTER_TYPE_WO, 313856693, 2, rfd_vxlan_fields },
+};
+
+static nt_fpga_field_init_t rmc_ctrl_fields[] = {
+	{ RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 },
+	{ RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 },
+	{ RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 },
+	{ RMC_CTRL_BLOCK_STATT, 1, 0, 1 },
+	{ RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 },
+};
+
+static nt_fpga_field_init_t rmc_dbg_fields[] = {
+	{ RMC_DBG_MERGE, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_mac_if_fields[] = {
+	{ RMC_MAC_IF_ERR, 31, 0, 0 },
+};
+
+static nt_fpga_field_init_t rmc_status_fields[] = {
+	{ RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 },
+	{ RMC_STATUS_SF_RAM_OF, 1, 0, 0 },
+};
+
+static nt_fpga_register_init_t rmc_registers[] = {
+	{ RMC_CTRL, 0, 25, REGISTER_TYPE_RW, 771, 5, rmc_ctrl_fields },
+	{ RMC_DBG, 2, 31, REGISTER_TYPE_RO, 0, 1, rmc_dbg_fields },
+	{ RMC_MAC_IF, 3, 31, REGISTER_TYPE_RO, 0, 1, rmc_mac_if_fields },
+	{ RMC_STATUS, 1, 17, REGISTER_TYPE_RO, 0, 2, rmc_status_fields },
+};
+
+static nt_fpga_field_init_t rpl_ext_ctrl_fields[] = {
+	{ RPL_EXT_CTRL_ADR, 10, 0, 0x0000 },
+	{ RPL_EXT_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_ext_data_fields[] = {
+	{ RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_ctrl_fields[] = {
+	{ RPL_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPL_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rcp_data_fields[] = {
+	{ RPL_RCP_DATA_DYN, 5, 0, 0x0000 },
+	{ RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 },
+	{ RPL_RCP_DATA_LEN, 8, 15, 0x0000 },
+	{ RPL_RCP_DATA_OFS, 10, 5, 0x0000 },
+	{ RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_ctrl_fields[] = {
+	{ RPL_RPL_CTRL_ADR, 12, 0, 0x0000 },
+	{ RPL_RPL_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpl_rpl_data_fields[] = {
+	{ RPL_RPL_DATA_VALUE, 128, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpl_registers[] = {
+	{ RPL_EXT_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2, rpl_ext_ctrl_fields },
+	{ RPL_EXT_DATA, 3, 12, REGISTER_TYPE_WO, 0, 1, rpl_ext_data_fields },
+	{ RPL_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields },
+	{ RPL_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 5, rpl_rcp_data_fields },
+	{ RPL_RPL_CTRL, 4, 32, REGISTER_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields },
+	{ RPL_RPL_DATA, 5, 128, REGISTER_TYPE_WO, 0, 1, rpl_rpl_data_fields },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_ctrl_fields[] = {
+	{ RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_ifr_rcp_data_fields[] = {
+	{ RPP_LR_IFR_RCP_DATA_EN, 1, 0, 0x0000 },
+	{ RPP_LR_IFR_RCP_DATA_MTU, 14, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_ctrl_fields[] = {
+	{ RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 },
+	{ RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t rpp_lr_rcp_data_fields[] = {
+	{ RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t rpp_lr_registers[] = {
+	{	RPP_LR_IFR_RCP_CTRL, 2, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_ctrl_fields
+	},
+	{	RPP_LR_IFR_RCP_DATA, 3, 15, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_ifr_rcp_data_fields
+	},
+	{	RPP_LR_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2,
+		rpp_lr_rcp_ctrl_fields
+	},
+	{	RPP_LR_RCP_DATA, 1, 14, REGISTER_TYPE_WO, 0, 1,
+		rpp_lr_rcp_data_fields
+	},
+};
+
+static nt_fpga_field_init_t rst9563_ctrl_fields[] = {
+	{ RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 },
+	{ RST9563_CTRL_TS_CLKSEL, 1, 1, 1 },
+	{ RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 },
+};
+
+static nt_fpga_field_init_t rst9563_power_fields[] = {
+	{ RST9563_POWER_PU_NSEB, 1, 1, 0 },
+	{ RST9563_POWER_PU_PHY, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_rst_fields[] = {
+	{ RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 },
+	{ RST9563_RST_MAC_RX, 2, 9, 3 },     { RST9563_RST_PERIPH, 1, 13, 0 },
+	{ RST9563_RST_PHY, 2, 7, 3 },	     { RST9563_RST_PTP, 1, 11, 1 },
+	{ RST9563_RST_PTP_MMCM, 1, 16, 0 },  { RST9563_RST_RPP, 1, 2, 1 },
+	{ RST9563_RST_SDC, 1, 6, 1 },	     { RST9563_RST_SYS, 1, 0, 1 },
+	{ RST9563_RST_SYS_MMCM, 1, 14, 0 },  { RST9563_RST_TMC, 1, 1, 1 },
+	{ RST9563_RST_TS, 1, 12, 1 },	     { RST9563_RST_TS_MMCM, 1, 17, 0 },
+};
+
+static nt_fpga_field_init_t rst9563_stat_fields[] = {
+	{ RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 },
+	{ RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 },
+	{ RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 },
+	{ RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 },
+	{ RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 },
+	{ RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_field_init_t rst9563_sticky_fields[] = {
+	{ RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 },
+	{ RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 },
+	{ RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 },
+	{ RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 },
+	{ RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 },
+	{ RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 },
+};
+
+static nt_fpga_register_init_t rst9563_registers[] = {
+	{ RST9563_CTRL, 1, 3, REGISTER_TYPE_RW, 7, 3, rst9563_ctrl_fields },
+	{ RST9563_POWER, 4, 2, REGISTER_TYPE_RW, 0, 2, rst9563_power_fields },
+	{ RST9563_RST, 0, 18, REGISTER_TYPE_RW, 8191, 14, rst9563_rst_fields },
+	{ RST9563_STAT, 2, 6, REGISTER_TYPE_RO, 0, 6, rst9563_stat_fields },
+	{	RST9563_STICKY, 3, 6, REGISTER_TYPE_RC1, 0, 6,
+		rst9563_sticky_fields
+	},
+};
+
+static nt_fpga_field_init_t slc_rcp_ctrl_fields[] = {
+	{ SLC_RCP_CTRL_ADR, 6, 0, 0x0000 },
+	{ SLC_RCP_CTRL_CNT, 16, 16, 0x0000 },
+};
+
+static nt_fpga_field_init_t slc_rcp_data_fields[] = {
+	{ SLC_RCP_DATA_PCAP, 1, 35, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 },
+	{ SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 },
+};
+
+static nt_fpga_register_init_t slc_registers[] = {
+	{ SLC_RCP_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 2, slc_rcp_ctrl_fields },
+	{ SLC_RCP_DATA, 1, 36, REGISTER_TYPE_WO, 0, 4, slc_rcp_data_fields },
+};
+
+static nt_fpga_field_init_t spim_cfg_fields[] = {
+	{ SPIM_CFG_PRE, 3, 0, 5 },
+};
+
+static nt_fpga_field_init_t spim_cr_fields[] = {
+	{ SPIM_CR_EN, 1, 1, 0 },
+	{ SPIM_CR_LOOP, 1, 0, 0 },
+	{ SPIM_CR_RXRST, 1, 3, 0 },
+	{ SPIM_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spim_drr_fields[] = {
+	{ SPIM_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_dtr_fields[] = {
+	{ SPIM_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spim_sr_fields[] = {
+	{ SPIM_SR_DONE, 1, 0, 0 },    { SPIM_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIM_SR_RXFULL, 1, 4, 0 },  { SPIM_SR_RXLVL, 8, 16, 0 },
+	{ SPIM_SR_TXEMPTY, 1, 1, 1 }, { SPIM_SR_TXFULL, 1, 3, 0 },
+	{ SPIM_SR_TXLVL, 8, 8, 0 },
+};
+
+static nt_fpga_field_init_t spim_srr_fields[] = {
+	{ SPIM_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spim_registers[] = {
+	{ SPIM_CFG, 5, 3, REGISTER_TYPE_WO, 5, 1, spim_cfg_fields },
+	{ SPIM_CR, 1, 4, REGISTER_TYPE_WO, 0, 4, spim_cr_fields },
+	{ SPIM_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spim_drr_fields },
+	{ SPIM_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spim_dtr_fields },
+	{ SPIM_SR, 2, 24, REGISTER_TYPE_RO, 6, 7, spim_sr_fields },
+	{ SPIM_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spim_srr_fields },
+};
+
+static nt_fpga_field_init_t spis_cr_fields[] = {
+	{ SPIS_CR_DEBUG, 1, 4, 0 }, { SPIS_CR_EN, 1, 1, 0 },
+	{ SPIS_CR_LOOP, 1, 0, 0 },  { SPIS_CR_RXRST, 1, 3, 0 },
+	{ SPIS_CR_TXRST, 1, 2, 0 },
+};
+
+static nt_fpga_field_init_t spis_drr_fields[] = {
+	{ SPIS_DRR_DRR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_dtr_fields[] = {
+	{ SPIS_DTR_DTR, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_ctrl_fields[] = {
+	{ SPIS_RAM_CTRL_ADR, 6, 0, 0 },
+	{ SPIS_RAM_CTRL_CNT, 6, 6, 0 },
+};
+
+static nt_fpga_field_init_t spis_ram_data_fields[] = {
+	{ SPIS_RAM_DATA_DATA, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t spis_sr_fields[] = {
+	{ SPIS_SR_DONE, 1, 0, 0 },	{ SPIS_SR_FRAME_ERR, 1, 24, 0 },
+	{ SPIS_SR_READ_ERR, 1, 25, 0 }, { SPIS_SR_RXEMPTY, 1, 2, 1 },
+	{ SPIS_SR_RXFULL, 1, 4, 0 },	{ SPIS_SR_RXLVL, 8, 16, 0 },
+	{ SPIS_SR_TXEMPTY, 1, 1, 1 },	{ SPIS_SR_TXFULL, 1, 3, 0 },
+	{ SPIS_SR_TXLVL, 8, 8, 0 },	{ SPIS_SR_WRITE_ERR, 1, 26, 0 },
+};
+
+static nt_fpga_field_init_t spis_srr_fields[] = {
+	{ SPIS_SRR_RST, 4, 0, 0 },
+};
+
+static nt_fpga_register_init_t spis_registers[] = {
+	{ SPIS_CR, 1, 5, REGISTER_TYPE_WO, 0, 5, spis_cr_fields },
+	{ SPIS_DRR, 4, 32, REGISTER_TYPE_RO, 0, 1, spis_drr_fields },
+	{ SPIS_DTR, 3, 32, REGISTER_TYPE_WO, 0, 1, spis_dtr_fields },
+	{ SPIS_RAM_CTRL, 5, 12, REGISTER_TYPE_RW, 0, 2, spis_ram_ctrl_fields },
+	{ SPIS_RAM_DATA, 6, 32, REGISTER_TYPE_RW, 0, 1, spis_ram_data_fields },
+	{ SPIS_SR, 2, 27, REGISTER_TYPE_RO, 6, 10, spis_sr_fields },
+	{ SPIS_SRR, 0, 4, REGISTER_TYPE_WO, 0, 1, spis_srr_fields },
+};
+
+static nt_fpga_field_init_t sta_byte_fields[] = {
+	{ STA_BYTE_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_cfg_fields[] = {
+	{ STA_CFG_CNT_CLEAR, 1, 1, 0 },
+	{ STA_CFG_DMA_ENA, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_cv_err_fields[] = {
+	{ STA_CV_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_fcs_err_fields[] = {
+	{ STA_FCS_ERR_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_lsb_fields[] = {
+	{ STA_HOST_ADR_LSB_LSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_host_adr_msb_fields[] = {
+	{ STA_HOST_ADR_MSB_MSB, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t sta_pckt_fields[] = {
+	{ STA_PCKT_CNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t sta_status_fields[] = {
+	{ STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t sta_registers[] = {
+	{ STA_BYTE, 4, 32, REGISTER_TYPE_RO, 0, 1, sta_byte_fields },
+	{ STA_CFG, 0, 2, REGISTER_TYPE_RW, 0, 2, sta_cfg_fields },
+	{ STA_CV_ERR, 5, 32, REGISTER_TYPE_RO, 0, 1, sta_cv_err_fields },
+	{ STA_FCS_ERR, 6, 32, REGISTER_TYPE_RO, 0, 1, sta_fcs_err_fields },
+	{	STA_HOST_ADR_LSB, 1, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_lsb_fields
+	},
+	{	STA_HOST_ADR_MSB, 2, 32, REGISTER_TYPE_WO, 0, 1,
+		sta_host_adr_msb_fields
+	},
+	{ STA_PCKT, 3, 32, REGISTER_TYPE_RO, 0, 1, sta_pckt_fields },
+	{ STA_STATUS, 7, 1, REGISTER_TYPE_RC1, 0, 1, sta_status_fields },
+};
+
+static nt_fpga_field_init_t tempmon_alarms_fields[] = {
+	{ TEMPMON_ALARMS_OT, 1, 1, 0x0000 },
+	{ TEMPMON_ALARMS_OT_OVERWR, 1, 2, 0 },
+	{ TEMPMON_ALARMS_OT_OVERWRVAL, 1, 3, 0 },
+	{ TEMPMON_ALARMS_TEMP, 1, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tempmon_stat_fields[] = {
+	{ TEMPMON_STAT_TEMP, 12, 0, 0x0000 },
+};
+
+static nt_fpga_register_init_t tempmon_registers[] = {
+	{	TEMPMON_ALARMS, 1, 4, REGISTER_TYPE_MIXED, 0, 4,
+		tempmon_alarms_fields
+	},
+	{ TEMPMON_STAT, 0, 12, REGISTER_TYPE_RO, 0, 1, tempmon_stat_fields },
+};
+
+static nt_fpga_field_init_t tint_ctrl_fields[] = {
+	{ TINT_CTRL_INTERVAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tint_status_fields[] = {
+	{ TINT_STATUS_DELAYED, 8, 8, 0 },
+	{ TINT_STATUS_SKIPPED, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tint_registers[] = {
+	{ TINT_CTRL, 0, 32, REGISTER_TYPE_WO, 0, 1, tint_ctrl_fields },
+	{ TINT_STATUS, 1, 16, REGISTER_TYPE_RC1, 0, 2, tint_status_fields },
+};
+
+static nt_fpga_field_init_t tmc_port_rpl_fields[] = {
+	{ TMC_PORT_RPL_P0, 1, 0, 0 },
+	{ TMC_PORT_RPL_P1, 1, 1, 1 },
+};
+
+static nt_fpga_register_init_t tmc_registers[] = {
+	{ TMC_PORT_RPL, 0, 2, REGISTER_TYPE_WO, 2, 2, tmc_port_rpl_fields },
+};
+
+static nt_fpga_field_init_t tsm_con0_config_fields[] = {
+	{ TSM_CON0_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON0_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con0_interface_fields[] = {
+	{ TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 },
+	{ TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 },
+	{ TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 },
+	{ TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 },
+	{ TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_hi_fields[] = {
+	{ TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con0_sample_lo_fields[] = {
+	{ TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_config_fields[] = {
+	{ TSM_CON1_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON1_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_hi_fields[] = {
+	{ TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con1_sample_lo_fields[] = {
+	{ TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_config_fields[] = {
+	{ TSM_CON2_CONFIG_BLIND, 5, 8, 9 },
+	{ TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 },
+	{ TSM_CON2_CONFIG_PORT, 3, 0, 0 },
+	{ TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 },
+	{ TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_hi_fields[] = {
+	{ TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con2_sample_lo_fields[] = {
+	{ TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_config_fields[] = {
+	{ TSM_CON3_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON3_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_hi_fields[] = {
+	{ TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con3_sample_lo_fields[] = {
+	{ TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_config_fields[] = {
+	{ TSM_CON4_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON4_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_hi_fields[] = {
+	{ TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con4_sample_lo_fields[] = {
+	{ TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_config_fields[] = {
+	{ TSM_CON5_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON5_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_hi_fields[] = {
+	{ TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con5_sample_lo_fields[] = {
+	{ TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_config_fields[] = {
+	{ TSM_CON6_CONFIG_BLIND, 5, 5, 26 },
+	{ TSM_CON6_CONFIG_PORT, 3, 0, 1 },
+	{ TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_hi_fields[] = {
+	{ TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con6_sample_lo_fields[] = {
+	{ TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_hi_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_con7_host_sample_lo_fields[] = {
+	{ TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_config_fields[] = {
+	{ TSM_CONFIG_NTTS_SRC, 2, 5, 0 },
+	{ TSM_CONFIG_NTTS_SYNC, 1, 4, 0 },
+	{ TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 },
+	{ TSM_CONFIG_TIMESET_SRC, 3, 10, 0 },
+	{ TSM_CONFIG_TIMESET_UP, 1, 7, 0 },
+	{ TSM_CONFIG_TS_FORMAT, 4, 0, 1 },
+};
+
+static nt_fpga_field_init_t tsm_int_config_fields[] = {
+	{ TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 },
+	{ TSM_INT_CONFIG_MASK, 19, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_int_stat_fields[] = {
+	{ TSM_INT_STAT_CAUSE, 19, 1, 0 },
+	{ TSM_INT_STAT_ENABLE, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_led_fields[] = {
+	{ TSM_LED_LED0_BG_COLOR, 2, 3, 0 },  { TSM_LED_LED0_COLOR, 2, 1, 0 },
+	{ TSM_LED_LED0_MODE, 1, 0, 0 },	     { TSM_LED_LED0_SRC, 4, 5, 0 },
+	{ TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 },
+	{ TSM_LED_LED1_MODE, 1, 9, 0 },	     { TSM_LED_LED1_SRC, 4, 14, 1 },
+	{ TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 },
+	{ TSM_LED_LED2_MODE, 1, 18, 0 },     { TSM_LED_LED2_SRC, 4, 23, 2 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_config_fields[] = {
+	{ TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 },
+	{ TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 },
+	{ TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 },
+	{ TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ext_stat_fields[] = {
+	{ TSM_NTTS_EXT_STAT_PRIMARY_ID, 8, 16, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_PRIMARY_REV, 8, 24, 0x0000 },
+	{ TSM_NTTS_EXT_STAT_PRIMARY_STAT, 16, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_hi_fields[] = {
+	{ TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_limit_lo_fields[] = {
+	{ TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_offset_fields[] = {
+	{ TSM_NTTS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_hi_fields[] = {
+	{ TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_sample_lo_fields[] = {
+	{ TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_stat_fields[] = {
+	{ TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 },
+	{ TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 },
+	{ TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_hi_fields[] = {
+	{ TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_lo_fields[] = {
+	{ TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ntts_ts_t0_offset_fields[] = {
+	{ TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pb_ctrl_fields[] = {
+	{ TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 },
+	{ TSM_PB_CTRL_RST, 1, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pb_instmem_fields[] = {
+	{ TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 },
+	{ TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_i_fields[] = {
+	{ TSM_PI_CTRL_I_VAL, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_ki_fields[] = {
+	{ TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_kp_fields[] = {
+	{ TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_pi_ctrl_shl_fields[] = {
+	{ TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_stat_fields[] = {
+	{ TSM_STAT_HARD_SYNC, 8, 8, 0 },      { TSM_STAT_LINK_CON0, 1, 0, 0 },
+	{ TSM_STAT_LINK_CON1, 1, 1, 0 },      { TSM_STAT_LINK_CON2, 1, 2, 0 },
+	{ TSM_STAT_LINK_CON3, 1, 3, 0 },      { TSM_STAT_LINK_CON4, 1, 4, 0 },
+	{ TSM_STAT_LINK_CON5, 1, 5, 0 },      { TSM_STAT_NTTS_INSYNC, 1, 6, 0 },
+	{ TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_ctrl_fields[] = {
+	{ TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 },
+	{ TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t0_fields[] = {
+	{ TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_timer_t1_fields[] = {
+	{ TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_hi_fields[] = {
+	{ TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hardset_lo_fields[] = {
+	{ TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_hi_fields[] = {
+	{ TSM_TIME_HI_SEC, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_lo_fields[] = {
+	{ TSM_TIME_LO_NS, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_time_rate_adj_fields[] = {
+	{ TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_hi_fields[] = {
+	{ TSM_TS_HI_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_lo_fields[] = {
+	{ TSM_TS_LO_TIME, 32, 0, 0x0000 },
+};
+
+static nt_fpga_field_init_t tsm_ts_offset_fields[] = {
+	{ TSM_TS_OFFSET_NS, 30, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_fields[] = {
+	{ TSM_TS_STAT_OVERRUN, 1, 16, 0 },
+	{ TSM_TS_STAT_SAMPLES, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_hi_offset_fields[] = {
+	{ TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_lo_offset_fields[] = {
+	{ TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_hi_fields[] = {
+	{ TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_tar_lo_fields[] = {
+	{ TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x_fields[] = {
+	{ TSM_TS_STAT_X_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_hi_fields[] = {
+	{ TSM_TS_STAT_X2_HI_NS, 16, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_ts_stat_x2_lo_fields[] = {
+	{ TSM_TS_STAT_X2_LO_NS, 32, 0, 0 },
+};
+
+static nt_fpga_field_init_t tsm_utc_offset_fields[] = {
+	{ TSM_UTC_OFFSET_SEC, 8, 0, 0 },
+};
+
+static nt_fpga_register_init_t tsm_registers[] = {
+	{	TSM_CON0_CONFIG, 24, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con0_config_fields
+	},
+	{	TSM_CON0_INTERFACE, 25, 20, REGISTER_TYPE_RW, 524291, 5,
+		tsm_con0_interface_fields
+	},
+	{	TSM_CON0_SAMPLE_HI, 27, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_hi_fields
+	},
+	{	TSM_CON0_SAMPLE_LO, 26, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con0_sample_lo_fields
+	},
+	{	TSM_CON1_CONFIG, 28, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con1_config_fields
+	},
+	{	TSM_CON1_SAMPLE_HI, 30, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_hi_fields
+	},
+	{	TSM_CON1_SAMPLE_LO, 29, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con1_sample_lo_fields
+	},
+	{	TSM_CON2_CONFIG, 31, 14, REGISTER_TYPE_RW, 2320, 5,
+		tsm_con2_config_fields
+	},
+	{	TSM_CON2_SAMPLE_HI, 33, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_hi_fields
+	},
+	{	TSM_CON2_SAMPLE_LO, 32, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con2_sample_lo_fields
+	},
+	{	TSM_CON3_CONFIG, 34, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con3_config_fields
+	},
+	{	TSM_CON3_SAMPLE_HI, 36, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_hi_fields
+	},
+	{	TSM_CON3_SAMPLE_LO, 35, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con3_sample_lo_fields
+	},
+	{	TSM_CON4_CONFIG, 37, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con4_config_fields
+	},
+	{	TSM_CON4_SAMPLE_HI, 39, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_hi_fields
+	},
+	{	TSM_CON4_SAMPLE_LO, 38, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con4_sample_lo_fields
+	},
+	{	TSM_CON5_CONFIG, 40, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con5_config_fields
+	},
+	{	TSM_CON5_SAMPLE_HI, 42, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_hi_fields
+	},
+	{	TSM_CON5_SAMPLE_LO, 41, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con5_sample_lo_fields
+	},
+	{	TSM_CON6_CONFIG, 43, 10, REGISTER_TYPE_RW, 841, 3,
+		tsm_con6_config_fields
+	},
+	{	TSM_CON6_SAMPLE_HI, 45, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_hi_fields
+	},
+	{	TSM_CON6_SAMPLE_LO, 44, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con6_sample_lo_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_HI, 47, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_hi_fields
+	},
+	{	TSM_CON7_HOST_SAMPLE_LO, 46, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_con7_host_sample_lo_fields
+	},
+	{ TSM_CONFIG, 0, 13, REGISTER_TYPE_RW, 257, 6, tsm_config_fields },
+	{	TSM_INT_CONFIG, 2, 20, REGISTER_TYPE_RW, 0, 2,
+		tsm_int_config_fields
+	},
+	{ TSM_INT_STAT, 3, 20, REGISTER_TYPE_MIXED, 0, 2, tsm_int_stat_fields },
+	{ TSM_LED, 4, 27, REGISTER_TYPE_RW, 16793600, 12, tsm_led_fields },
+	{	TSM_NTTS_CONFIG, 13, 7, REGISTER_TYPE_RW, 32, 4,
+		tsm_ntts_config_fields
+	},
+	{	TSM_NTTS_EXT_STAT, 15, 32, REGISTER_TYPE_MIXED, 0, 3,
+		tsm_ntts_ext_stat_fields
+	},
+	{	TSM_NTTS_LIMIT_HI, 23, 16, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_limit_hi_fields
+	},
+	{	TSM_NTTS_LIMIT_LO, 22, 32, REGISTER_TYPE_RW, 100000, 1,
+		tsm_ntts_limit_lo_fields
+	},
+	{	TSM_NTTS_OFFSET, 21, 30, REGISTER_TYPE_RW, 0, 1,
+		tsm_ntts_offset_fields
+	},
+	{	TSM_NTTS_SAMPLE_HI, 19, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_hi_fields
+	},
+	{	TSM_NTTS_SAMPLE_LO, 18, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_sample_lo_fields
+	},
+	{ TSM_NTTS_STAT, 14, 17, REGISTER_TYPE_RO, 0, 3, tsm_ntts_stat_fields },
+	{	TSM_NTTS_TS_T0_HI, 17, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_hi_fields
+	},
+	{	TSM_NTTS_TS_T0_LO, 16, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_lo_fields
+	},
+	{	TSM_NTTS_TS_T0_OFFSET, 20, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ntts_ts_t0_offset_fields
+	},
+	{ TSM_PB_CTRL, 63, 2, REGISTER_TYPE_WO, 0, 2, tsm_pb_ctrl_fields },
+	{	TSM_PB_INSTMEM, 64, 32, REGISTER_TYPE_WO, 0, 2,
+		tsm_pb_instmem_fields
+	},
+	{ TSM_PI_CTRL_I, 54, 32, REGISTER_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields },
+	{	TSM_PI_CTRL_KI, 52, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_ki_fields
+	},
+	{	TSM_PI_CTRL_KP, 51, 24, REGISTER_TYPE_RW, 0, 1,
+		tsm_pi_ctrl_kp_fields
+	},
+	{	TSM_PI_CTRL_SHL, 53, 4, REGISTER_TYPE_WO, 0, 1,
+		tsm_pi_ctrl_shl_fields
+	},
+	{ TSM_STAT, 1, 16, REGISTER_TYPE_RO, 0, 9, tsm_stat_fields },
+	{	TSM_TIMER_CTRL, 48, 2, REGISTER_TYPE_RW, 0, 2,
+		tsm_timer_ctrl_fields
+	},
+	{	TSM_TIMER_T0, 49, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t0_fields
+	},
+	{	TSM_TIMER_T1, 50, 30, REGISTER_TYPE_RW, 50000, 1,
+		tsm_timer_t1_fields
+	},
+	{	TSM_TIME_HARDSET_HI, 12, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_hi_fields
+	},
+	{	TSM_TIME_HARDSET_LO, 11, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_time_hardset_lo_fields
+	},
+	{ TSM_TIME_HI, 9, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_hi_fields },
+	{ TSM_TIME_LO, 8, 32, REGISTER_TYPE_RW, 0, 1, tsm_time_lo_fields },
+	{	TSM_TIME_RATE_ADJ, 10, 29, REGISTER_TYPE_RW, 0, 1,
+		tsm_time_rate_adj_fields
+	},
+	{ TSM_TS_HI, 6, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_hi_fields },
+	{ TSM_TS_LO, 5, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_lo_fields },
+	{ TSM_TS_OFFSET, 7, 30, REGISTER_TYPE_RW, 0, 1, tsm_ts_offset_fields },
+	{ TSM_TS_STAT, 55, 17, REGISTER_TYPE_RO, 0, 2, tsm_ts_stat_fields },
+	{	TSM_TS_STAT_HI_OFFSET, 62, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_hi_offset_fields
+	},
+	{	TSM_TS_STAT_LO_OFFSET, 61, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_lo_offset_fields
+	},
+	{	TSM_TS_STAT_TAR_HI, 57, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_hi_fields
+	},
+	{	TSM_TS_STAT_TAR_LO, 56, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_tar_lo_fields
+	},
+	{ TSM_TS_STAT_X, 58, 32, REGISTER_TYPE_RO, 0, 1, tsm_ts_stat_x_fields },
+	{	TSM_TS_STAT_X2_HI, 60, 16, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_hi_fields
+	},
+	{	TSM_TS_STAT_X2_LO, 59, 32, REGISTER_TYPE_RO, 0, 1,
+		tsm_ts_stat_x2_lo_fields
+	},
+	{	TSM_UTC_OFFSET, 65, 8, REGISTER_TYPE_RW, 0, 1,
+		tsm_utc_offset_fields
+	},
+};
+
+static nt_fpga_module_init_t fpga_modules[] = {
+	{	MOD_CAT, 0, MOD_CAT, 0, 21, BUS_TYPE_RAB1, 768, 34,
+		cat_registers
+	}, /* CAT:0 CAT v0.21: CAT @ RAB1,768 (CAT CAT CAT) */
+	{	MOD_CSU, 0, MOD_CSU, 0, 0, BUS_TYPE_RAB1, 9728, 2,
+		csu_registers
+	}, /* CSU:0 CSU v0.0: CSU @ RAB1,9728 (CSU CSU CSU) */
+	{	MOD_DBS, 0, MOD_DBS, 0, 11, BUS_TYPE_RAB2, 12832, 27,
+		dbs_registers
+	}, /* DBS:0 DBS v0.11: DBS @ RAB2,12832 (DBS DBS DBS) */
+	{	MOD_FLM, 0, MOD_FLM, 0, 20, BUS_TYPE_RAB1, 1280, 43,
+		flm_registers
+	}, /* FLM:0 FLM v0.20: FLM @ RAB1,1280 (FLM FLM FLM) */
+	{	MOD_GFG, 0, MOD_GFG, 1, 1, BUS_TYPE_RAB2, 8704, 10,
+		gfg_registers
+	}, /* GFG:0 GFG v1.1: GFG @ RAB2,8704 (GFG GFG GFG) */
+	{	MOD_GMF, 0, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9216, 12,
+		gmf_registers
+	}, /* GMF:0 GMF v2.5: GMF_0 @ RAB2,9216 (GMF GMF_0 GMF) */
+	{	MOD_GMF, 1, MOD_GMF, 2, 5, BUS_TYPE_RAB2, 9728, 12,
+		gmf_registers
+	}, /* GMF:1 GMF v2.5: GMF_1 @ RAB2,9728 (GMF GMF_1 GMF) */
+	{	MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, BUS_TYPE_RAB0, 16386, 2,
+		gpio_phy_registers
+	}, /* GPIO_PHY:0 GPIO_PHY v1.0: GPIO_PHY @ RAB0,16386 (GPIO_PHY GPIO_PHY GPIO_PHY) */
+	{	MOD_HFU, 0, MOD_HFU, 0, 1, BUS_TYPE_RAB1, 9472, 2,
+		hfu_registers
+	}, /* HFU:0 HFU v0.1: HFU @ RAB1,9472 (HFU HFU HFU) */
+	{	MOD_HIF, 0, MOD_HIF, 0, 0, BUS_TYPE_PCI, 0, 18,
+		hif_registers
+	}, /* HIF:0 HIF v0.0: HIF @ PCI,0 (HIF HIF HIF) */
+	{	MOD_HSH, 0, MOD_HSH, 0, 5, BUS_TYPE_RAB1, 1536, 2,
+		hsh_registers
+	}, /* HSH:0 HSH v0.5: HSH @ RAB1,1536 (HSH HSH HSH) */
+	{	MOD_HST, 0, MOD_HST, 0, 2, BUS_TYPE_RAB1, 2048, 2,
+		hst_registers
+	}, /* HST:0 HST v0.2: HST @ RAB1,2048 (HST HST HST) */
+	{	MOD_IFR, 0, MOD_IFR, 0, 1, BUS_TYPE_RAB1, 9984, 2,
+		ifr_registers
+	}, /* IFR:0 IFR v0.1: IFR @ RAB1,9984 (IFR IFR IFR) */
+	{	MOD_IIC, 0, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 768, 22,
+		iic_registers
+	}, /* IIC:0 IIC v0.1: IIC0 @ RAB0,768 (IIC IIC0 IIC) */
+	{	MOD_IIC, 1, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 896, 22,
+		iic_registers
+	}, /* IIC:1 IIC v0.1: IIC1 @ RAB0,896 (IIC IIC1 IIC) */
+	{	MOD_IIC, 2, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24832, 22,
+		iic_registers
+	}, /* IIC:2 IIC v0.1: IIC2 @ RAB0,24832 (IIC IIC2 IIC) */
+	{	MOD_IIC, 3, MOD_IIC, 0, 1, BUS_TYPE_RAB0, 24960, 22,
+		iic_registers
+	}, /* IIC:3 IIC v0.1: IIC3 @ RAB0,24960 (IIC IIC3 IIC) */
+	{	MOD_KM, 0, MOD_KM, 0, 7, BUS_TYPE_RAB1, 1024, 11,
+		km_registers
+	}, /* KM:0 KM v0.7: KM @ RAB1,1024 (KM KM KM) */
+	{	MOD_MAC_PCS, 0, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 10240, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:0 MAC_PCS v0.2: MAC_PCS_0 @ RAB2,10240 (MAC_PCS MAC_PCS_0 MAC_PCS) */
+	{	MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, BUS_TYPE_RAB2, 11776, 44,
+		mac_pcs_registers
+	}, /* MAC_PCS:1 MAC_PCS v0.2: MAC_PCS_1 @ RAB2,11776 (MAC_PCS MAC_PCS_1 MAC_PCS) */
+	{	MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 10752, 9,
+		mac_rx_registers
+	}, /* MAC_RX:0 MAC_RX v0.0: MAC_RX_0 @ RAB2,10752 (MAC_RX MAC_RX_0 MAC_RX) */
+	{	MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, BUS_TYPE_RAB2, 12288, 9,
+		mac_rx_registers
+	}, /* MAC_RX:1 MAC_RX v0.0: MAC_RX_1 @ RAB2,12288 (MAC_RX MAC_RX_1 MAC_RX) */
+	{	MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 11264, 5,
+		mac_tx_registers
+	}, /* MAC_TX:0 MAC_TX v0.0: MAC_TX_0 @ RAB2,11264 (MAC_TX MAC_TX_0 MAC_TX) */
+	{	MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, BUS_TYPE_RAB2, 12800, 5,
+		mac_tx_registers
+	}, /* MAC_TX:1 MAC_TX v0.0: MAC_TX_1 @ RAB2,12800 (MAC_TX MAC_TX_1 MAC_TX) */
+	{	MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, BUS_TYPE_RAB0, 2320, 6,
+		pci_rd_tg_registers
+	}, /* PCI_RD_TG:0 PCI_RD_TG v0.1: PCI_RD_TG @ RAB0,2320 (PCI_RD_TG PCI_RD_TG PCI_RD_TG) */
+	{	MOD_PCI_TA, 0, MOD_PCI_TA, 0, 0, BUS_TYPE_RAB0, 2336, 5,
+		pci_ta_registers
+	}, /* PCI_TA:0 PCI_TA v0.0: PCI_TA @ RAB0,2336 (PCI_TA PCI_TA PCI_TA) */
+	{	MOD_PCI_WR_TG, 0, MOD_PCI_WR_TG, 0, 1, BUS_TYPE_RAB0, 2304, 7,
+		pci_wr_tg_registers
+	}, /* PCI_WR_TG:0 PCI_WR_TG v0.1: PCI_WR_TG @ RAB0,2304 (PCI_WR_TG PCI_WR_TG PCI_WR_TG) */
+	{	MOD_PDB, 0, MOD_PDB, 0, 9, BUS_TYPE_RAB1, 2816, 3,
+		pdb_registers
+	}, /* PDB:0 PDB v0.9: PDB @ RAB1,2816 (PDB PDB PDB) */
+	{	MOD_PDI, 0, MOD_PDI, 1, 1, BUS_TYPE_RAB0, 64, 6,
+		pdi_registers
+	}, /* PDI:0 PDI v1.1: PDI @ RAB0,64 (PDI PDI PDI) */
+	{	MOD_PTP1588, 0, MOD_PTP1588, 2, 1, BUS_TYPE_RAB2, 512, 23,
+		ptp1588_registers
+	}, /* PTP1588:0 PTP1588 v2.1: PTP1588 @ RAB2,512 (PTP1588 PTP1588 PTP1588) */
+	{	MOD_QSL, 0, MOD_QSL, 0, 7, BUS_TYPE_RAB1, 1792, 8,
+		qsl_registers
+	}, /* QSL:0 QSL v0.7: QSL @ RAB1,1792 (QSL QSL QSL) */
+	{	MOD_QSPI, 0, MOD_QSPI, 0, 0, BUS_TYPE_RAB0, 512, 11,
+		qspi_registers
+	}, /* QSPI:0 QSPI v0.0: QSPI @ RAB0,512 (QSPI QSPI QSPI) */
+	{	MOD_RAC, 0, MOD_RAC, 3, 0, BUS_TYPE_PCI, 8192, 14,
+		rac_registers
+	}, /* RAC:0 RAC v3.0: RAC @ PCI,8192 (RAC RAC RAC) */
+	{	MOD_RFD, 0, MOD_RFD, 0, 4, BUS_TYPE_RAB1, 256, 5,
+		rfd_registers
+	}, /* RFD:0 RFD v0.4: RFD @ RAB1,256 (RFD RFD RFD) */
+	{	MOD_RMC, 0, MOD_RMC, 1, 3, BUS_TYPE_RAB0, 12288, 4,
+		rmc_registers
+	}, /* RMC:0 RMC v1.3: RMC @ RAB0,12288 (RMC RMC RMC) */
+	{	MOD_RPP_LR, 0, MOD_RPP_LR, 0, 1, BUS_TYPE_RAB1, 2560, 4,
+		rpp_lr_registers
+	}, /* RPP_LR:0 RPP_LR v0.1: RPP_LR @ RAB1,2560 (RPP_LR RPP_LR RPP_LR) */
+	{	MOD_RST9563, 0, MOD_RST9563, 0, 5, BUS_TYPE_RAB0, 1024, 5,
+		rst9563_registers
+	}, /* RST9563:0 RST9563 v0.5: RST9563 @ RAB0,1024 (RST9563 RST9563 RST9563) */
+	{	MOD_SLC_LR, 0, MOD_SLC, 0, 2, BUS_TYPE_RAB1, 2304, 2,
+		slc_registers
+	}, /* SLC_LR:0 SLC v0.2: SLC_LR @ RAB1,2304 (SLC SLC_LR SLC_LR) */
+	{	MOD_SPIM, 0, MOD_SPIM, 1, 0, BUS_TYPE_RAB0, 80, 6,
+		spim_registers
+	}, /* SPIM:0 SPIM v1.0: SPIM @ RAB0,80 (SPIM SPIM SPIM) */
+	{	MOD_SPIS, 0, MOD_SPIS, 1, 0, BUS_TYPE_RAB0, 256, 7,
+		spis_registers
+	}, /* SPIS:0 SPIS v1.0: SPIS @ RAB0,256 (SPIS SPIS SPIS) */
+	{	MOD_STA, 0, MOD_STA, 0, 8, BUS_TYPE_RAB0, 2048, 8,
+		sta_registers
+	}, /* STA:0 STA v0.8: STA @ RAB0,2048 (STA STA STA) */
+	{	MOD_TEMPMON, 0, MOD_TEMPMON, 0, 0, BUS_TYPE_RAB0, 16384, 2,
+		tempmon_registers
+	}, /* TEMPMON:0 TEMPMON v0.0: TEMPMON @ RAB0,16384 (TEMPMON TEMPMON TEMPMON) */
+	{	MOD_TINT, 0, MOD_TINT, 0, 0, BUS_TYPE_RAB0, 1280, 2,
+		tint_registers
+	}, /* TINT:0 TINT v0.0: TINT @ RAB0,1280 (TINT TINT TINT) */
+	{	MOD_TMC, 0, MOD_TMC, 0, 1, BUS_TYPE_RAB2, 8192, 1,
+		tmc_registers
+	}, /* TMC:0 TMC v0.1: TMC @ RAB2,8192 (TMC TMC TMC) */
+	{	MOD_TSM, 0, MOD_TSM, 0, 8, BUS_TYPE_RAB2, 1024, 66,
+		tsm_registers
+	}, /* TSM:0 TSM v0.8: TSM @ RAB2,1024 (TSM TSM TSM) */
+	{	MOD_TX_CPY, 0, MOD_CPY, 0, 1, BUS_TYPE_RAB1, 9216, 20,
+		cpy_registers
+	}, /* TX_CPY:0 CPY v0.1: TX_CPY @ RAB1,9216 (CPY TX_CPY TX_CPY) */
+	{	MOD_TX_INS, 0, MOD_INS, 0, 1, BUS_TYPE_RAB1, 8704, 2,
+		ins_registers
+	}, /* TX_INS:0 INS v0.1: TX_INS @ RAB1,8704 (INS TX_INS TX_INS) */
+	{	MOD_TX_RPL, 0, MOD_RPL, 0, 2, BUS_TYPE_RAB1, 8960, 6,
+		rpl_registers
+	}, /* TX_RPL:0 RPL v0.2: TX_RPL @ RAB1,8960 (RPL TX_RPL TX_RPL) */
+};
+
+static nt_fpga_prod_param_t product_parameters[] = {
+	{ NT_BUILD_NUMBER, 0 },
+	{ NT_BUILD_TIME, 1693492863 },
+	{ NT_CATEGORIES, 64 },
+	{ NT_CAT_DCT_PRESENT, 0 },
+	{ NT_CAT_END_OFS_SUPPORT, 0 },
+	{ NT_CAT_FUNCS, 64 },
+	{ NT_CAT_KCC_BANKS, 3 },
+	{ NT_CAT_KCC_PRESENT, 0 },
+	{ NT_CAT_KCC_SIZE, 1536 },
+	{ NT_CAT_KM_IF_CNT, 2 },
+	{ NT_CAT_KM_IF_M0, 0 },
+	{ NT_CAT_KM_IF_M1, 1 },
+	{ NT_CAT_N_CMP, 8 },
+	{ NT_CAT_N_EXT, 4 },
+	{ NT_CAT_N_LEN, 8 },
+	{ NT_CB_DEBUG, 0 },
+	{ NT_COR_CATEGORIES, 16 },
+	{ NT_COR_PRESENT, 0 },
+	{ NT_CSU_PRESENT, 1 },
+	{ NT_DBS_PRESENT, 1 },
+	{ NT_DBS_RX_QUEUES, 128 },
+	{ NT_DBS_TX_PORTS, 2 },
+	{ NT_DBS_TX_QUEUES, 128 },
+	{ NT_DDP_PRESENT, 0 },
+	{ NT_DDP_TBL_DEPTH, 4096 },
+	{ NT_EMI_SPLIT_STEPS, 16 },
+	{ NT_EOF_TIMESTAMP_ONLY, 1 },
+	{ NT_EPP_CATEGORIES, 32 },
+	{ NT_FLM_CACHE, 1 },
+	{ NT_FLM_CATEGORIES, 32 },
+	{ NT_FLM_ENTRY_SIZE, 64 },
+	{ NT_FLM_PRESENT, 1 },
+	{ NT_FLM_PRIOS, 4 },
+	{ NT_FLM_PST_PROFILES, 16 },
+	{ NT_FLM_SIZE_MB, 12288 },
+	{ NT_FLM_STATEFUL, 1 },
+	{ NT_FLM_VARIANT, 2 },
+	{ NT_GFG_PRESENT, 1 },
+	{ NT_GFG_TX_LIVE_RECONFIG_SUPPORT, 1 },
+	{ NT_GMF_FCS_PRESENT, 0 },
+	{ NT_GMF_IFG_SPEED_DIV, 33 },
+	{ NT_GMF_IFG_SPEED_DIV100G, 33 },
+	{ NT_GMF_IFG_SPEED_MUL, 20 },
+	{ NT_GMF_IFG_SPEED_MUL100G, 20 },
+	{ NT_GROUP_ID, 9563 },
+	{ NT_HFU_PRESENT, 1 },
+	{ NT_HIF_MSIX_BAR, 1 },
+	{ NT_HIF_MSIX_PBA_OFS, 8192 },
+	{ NT_HIF_MSIX_PRESENT, 1 },
+	{ NT_HIF_MSIX_TBL_OFS, 0 },
+	{ NT_HIF_MSIX_TBL_SIZE, 8 },
+	{ NT_HIF_PER_PS, 4000 },
+	{ NT_HIF_SRIOV_PRESENT, 1 },
+	{ NT_HSH_CATEGORIES, 16 },
+	{ NT_HSH_TOEPLITZ, 1 },
+	{ NT_HST_CATEGORIES, 32 },
+	{ NT_HST_PRESENT, 1 },
+	{ NT_IOA_CATEGORIES, 1024 },
+	{ NT_IOA_PRESENT, 0 },
+	{ NT_IPF_PRESENT, 0 },
+	{ NT_KM_CAM_BANKS, 3 },
+	{ NT_KM_CAM_RECORDS, 2048 },
+	{ NT_KM_CAM_REC_WORDS, 6 },
+	{ NT_KM_CATEGORIES, 32 },
+	{ NT_KM_END_OFS_SUPPORT, 0 },
+	{ NT_KM_EXT_EXTRACTORS, 0 },
+	{ NT_KM_FLOW_TYPES, 16 },
+	{ NT_KM_PRESENT, 1 },
+	{ NT_KM_SWX_PRESENT, 0 },
+	{ NT_KM_SYNERGY_MATCH, 0 },
+	{ NT_KM_TCAM_BANKS, 12 },
+	{ NT_KM_TCAM_BANK_WIDTH, 72 },
+	{ NT_KM_TCAM_HIT_QUAL, 0 },
+	{ NT_KM_TCAM_KEYWAY, 1 },
+	{ NT_KM_WIDE, 1 },
+	{ NT_LR_PRESENT, 1 },
+	{ NT_MCU_PRESENT, 0 },
+	{ NT_MDG_DEBUG_FLOW_CONTROL, 0 },
+	{ NT_MDG_DEBUG_REG_READ_BACK, 0 },
+	{ NT_MSK_CATEGORIES, 32 },
+	{ NT_MSK_PRESENT, 0 },
+	{ NT_NFV_OVS_PRODUCT, 0 },
+	{ NT_NIMS, 2 },
+	{ NT_PCI_DEVICE_ID, 453 },
+	{ NT_PCI_TA_TG_PRESENT, 1 },
+	{ NT_PCI_VENDOR_ID, 6388 },
+	{ NT_PDB_CATEGORIES, 16 },
+	{ NT_PHY_ANEG_PRESENT, 0 },
+	{ NT_PHY_KRFEC_PRESENT, 0 },
+	{ NT_PHY_PORTS, 2 },
+	{ NT_PHY_PORTS_PER_QUAD, 1 },
+	{ NT_PHY_QUADS, 2 },
+	{ NT_PHY_RSFEC_PRESENT, 1 },
+	{ NT_QM_CELLS, 2097152 },
+	{ NT_QM_CELL_SIZE, 6144 },
+	{ NT_QM_PRESENT, 0 },
+	{ NT_QSL_CATEGORIES, 32 },
+	{ NT_QSL_COLOR_SEL_BW, 7 },
+	{ NT_QSL_QST_SIZE, 4096 },
+	{ NT_QUEUES, 128 },
+	{ NT_RAC_RAB_INTERFACES, 3 },
+	{ NT_RAC_RAB_OB_UPDATE, 0 },
+	{ NT_REVISION_ID, 24 },
+	{ NT_RMC_LAG_GROUPS, 1 },
+	{ NT_ROA_CATEGORIES, 1024 },
+	{ NT_ROA_PRESENT, 0 },
+	{ NT_RPP_PER_PS, 3333 },
+	{ NT_RTX_PRESENT, 0 },
+	{ NT_RX_HOST_BUFFERS, 128 },
+	{ NT_RX_PORTS, 2 },
+	{ NT_RX_PORT_REPLICATE, 0 },
+	{ NT_SLB_PRESENT, 0 },
+	{ NT_SLC_LR_PRESENT, 1 },
+	{ NT_STA_COLORS, 64 },
+	{ NT_STA_RX_PORTS, 2 },
+	{ NT_TBH_DEBUG_DLN, 1 },
+	{ NT_TBH_PRESENT, 0 },
+	{ NT_TFD_PRESENT, 1 },
+	{ NT_TPE_CATEGORIES, 16 },
+	{ NT_TSM_OST_ONLY, 0 },
+	{ NT_TS_APPEND, 0 },
+	{ NT_TS_INJECT_PRESENT, 0 },
+	{ NT_TX_CPY_PACKET_READERS, 0 },
+	{ NT_TX_CPY_PRESENT, 1 },
+	{ NT_TX_CPY_SIDEBAND_READERS, 6 },
+	{ NT_TX_CPY_VARIANT, 0 },
+	{ NT_TX_CPY_WRITERS, 5 },
+	{ NT_TX_HOST_BUFFERS, 128 },
+	{ NT_TX_INS_PRESENT, 1 },
+	{ NT_TX_MTU_PROFILE_IFR, 16 },
+	{ NT_TX_ON_TIMESTAMP, 1 },
+	{ NT_TX_PORTS, 2 },
+	{ NT_TX_PORT_REPLICATE, 1 },
+	{ NT_TX_RPL_DEPTH, 4096 },
+	{ NT_TX_RPL_EXT_CATEGORIES, 1024 },
+	{ NT_TX_RPL_PRESENT, 1 },
+	{ NT_TYPE_ID, 200 },
+	{ NT_USE_TRIPLE_SPEED, 0 },
+	{ NT_VERSION_ID, 55 },
+	{ NT_VLI_PRESENT, 0 },
+	{ 0, -1 }, /* END */
+};
+
+nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000 = {
+	200, /* fpgaTypeId */
+	9563, /* fpga_product_id */
+	55, /* fpga_version */
+	24, /* fpga_revision */
+	0, /* fpga_patch_no */
+	0, /* fpga_build_no */
+	1693492863, /* fpga_build_time */
+	140,	    product_parameters, 48, fpga_modules,
+};
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
new file mode 100644
index 0000000000..1d707d6925
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_instances.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_FPGA_INSTANCES_H
+#define NTHW_FPGA_INSTANCES_H
+
+#include "fpga_model.h"
+
+extern nt_fpga_prod_init_t *nthw_fpga_instances[];
+
+extern nt_fpga_prod_init_t nthw_fpga_9563_055_024_0000;
+
+#endif /* NTHW_FPGA_INSTANCES_H */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
new file mode 100644
index 0000000000..38a15bec87
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_modules_defs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_MODULES_DEFS_H_
+#define _NTHW_FPGA_MODULES_DEFS_H_
+
+/* Unknown/uninitialized - keep this as the first element */
+#define MOD_UNKNOWN (0L)
+#define MOD_CAT (1L)
+#define MOD_CB (2L)
+#define MOD_CCIP (3L)
+#define MOD_CFP4_CTRL_GBOX (4L)
+#define MOD_COR (5L)
+#define MOD_CPY (6L)
+#define MOD_CSU (7L)
+#define MOD_DBS (8L)
+#define MOD_DDP (9L)
+#define MOD_EPP (10L)
+#define MOD_EQM (11L)
+#define MOD_FHM (12L)
+#define MOD_FLM (13L)
+#define MOD_GFG (14L)
+#define MOD_GMF (15L)
+#define MOD_GPIO_PHY (16L)
+#define MOD_GPIO_PHY_PORTS (17L)
+#define MOD_GPIO_SFPP (18L)
+#define MOD_HFU (19L)
+#define MOD_HIF (20L)
+#define MOD_HSH (21L)
+#define MOD_HST (22L)
+#define MOD_ICORE_10G (23L)
+#define MOD_IFR (24L)
+#define MOD_IIC (25L)
+#define MOD_INS (26L)
+#define MOD_IOA (27L)
+#define MOD_IPF (28L)
+#define MOD_KM (29L)
+#define MOD_LAO (30L)
+#define MOD_MAC (31L)
+#define MOD_MAC100 (33L)
+#define MOD_MAC10G (34L)
+#define MOD_MAC1G (35L)
+#define MOD_MAC_PCS (36L)
+#define MOD_MAC_PCS_XXV (37L)
+#define MOD_MAC_RX (38L)
+#define MOD_MAC_TFG (39L)
+#define MOD_MAC_TX (40L)
+#define MOD_MCU (41L)
+#define MOD_MDG (42L)
+#define MOD_MSK (43L)
+#define MOD_NIF (44L)
+#define MOD_PCIE3 (45L)
+#define MOD_PCI_RD_TG (46L)
+#define MOD_PCI_TA (47L)
+#define MOD_PCI_WR_TG (48L)
+#define MOD_PCM_NT100A01_01 (49L)
+#define MOD_PCM_NT50B01_01 (50L)
+#define MOD_PCS (51L)
+#define MOD_PCS100 (52L)
+#define MOD_PDB (53L)
+#define MOD_PDI (54L)
+#define MOD_PHY10G (55L)
+#define MOD_PHY3S10G (56L)
+#define MOD_PM (57L)
+#define MOD_PRM_NT100A01_01 (58L)
+#define MOD_PRM_NT50B01_01 (59L)
+#define MOD_PTP1588 (60L)
+#define MOD_QM (61L)
+#define MOD_QSL (62L)
+#define MOD_QSPI (63L)
+#define MOD_R2DRP (64L)
+#define MOD_RAC (65L)
+#define MOD_RBH (66L)
+#define MOD_RFD (67L)
+#define MOD_RMC (68L)
+#define MOD_RNTC (69L)
+#define MOD_ROA (70L)
+#define MOD_RPL (71L)
+#define MOD_RPP_LR (72L)
+#define MOD_RST7000 (73L)
+#define MOD_RST7001 (74L)
+#define MOD_RST9500 (75L)
+#define MOD_RST9501 (76L)
+#define MOD_RST9502 (77L)
+#define MOD_RST9503 (78L)
+#define MOD_RST9504 (79L)
+#define MOD_RST9505 (80L)
+#define MOD_RST9506 (81L)
+#define MOD_RST9507 (82L)
+#define MOD_RST9508 (83L)
+#define MOD_RST9509 (84L)
+#define MOD_RST9510 (85L)
+#define MOD_RST9512 (86L)
+#define MOD_RST9513 (87L)
+#define MOD_RST9515 (88L)
+#define MOD_RST9516 (89L)
+#define MOD_RST9517 (90L)
+#define MOD_RST9519 (91L)
+#define MOD_RST9520 (92L)
+#define MOD_RST9521 (93L)
+#define MOD_RST9522 (94L)
+#define MOD_RST9523 (95L)
+#define MOD_RST9524 (96L)
+#define MOD_RST9525 (97L)
+#define MOD_RST9526 (98L)
+#define MOD_RST9527 (99L)
+#define MOD_RST9528 (100L)
+#define MOD_RST9529 (101L)
+#define MOD_RST9530 (102L)
+#define MOD_RST9531 (103L)
+#define MOD_RST9532 (104L)
+#define MOD_RST9533 (105L)
+#define MOD_RST9534 (106L)
+#define MOD_RST9535 (107L)
+#define MOD_RST9536 (108L)
+#define MOD_RST9537 (109L)
+#define MOD_RST9538 (110L)
+#define MOD_RST9539 (111L)
+#define MOD_RST9540 (112L)
+#define MOD_RST9541 (113L)
+#define MOD_RST9542 (114L)
+#define MOD_RST9543 (115L)
+#define MOD_RST9544 (116L)
+#define MOD_RST9545 (117L)
+#define MOD_RST9546 (118L)
+#define MOD_RST9547 (119L)
+#define MOD_RST9548 (120L)
+#define MOD_RST9549 (121L)
+#define MOD_RST9553 (122L)
+#define MOD_RST9555 (123L)
+#define MOD_RST9559 (124L)
+#define MOD_RST9563 (125L)
+#define MOD_RTD (126L)
+#define MOD_RTD_HMP (127L)
+#define MOD_RTX (128L)
+#define MOD_SDC (129L)
+#define MOD_SLC (130L)
+#define MOD_SLC_LR (131L)
+#define MOD_SMM (132L)
+#define MOD_SMM_RX (133L)
+#define MOD_SMM_TX (134L)
+#define MOD_SPIM (135L)
+#define MOD_SPIS (136L)
+#define MOD_STA (137L)
+#define MOD_TBH (138L)
+#define MOD_TEMPMON (139L)
+#define MOD_TINT (140L)
+#define MOD_TMC (141L)
+#define MOD_TSM (142L)
+#define MOD_TX_CPY (143L)
+#define MOD_TX_CSI (144L)
+#define MOD_TX_CSO (145L)
+#define MOD_TX_INS (146L)
+#define MOD_TX_RPL (147L)
+/*
+ * NOTE: Keep this as the last element!
+ * End indicator - keep this as the last element - only aliases go below this point
+ */
+#define MOD_UNKNOWN_MAX (148L)
+/* End indicator - keep this as the last element - only aliases go below this point */
+#define MOD_COUNT_MAX (148L)
+/* aliases */
+#define MOD_MAC10 (MOD_MAC10G) /* alias */
+
+#endif /* _NTHW_FPGA_MODULES_DEFS_H_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
new file mode 100644
index 0000000000..b6187a257f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_parameters_defs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_PARAMETERS_DEFS_
+#define _NTHW_FPGA_PARAMETERS_DEFS_
+
+#define NT_PARAM_UNKNOWN (0L)
+#define NT_BUILD_NUMBER (1L)
+#define NT_BUILD_TIME (2L)
+#define NT_CATEGORIES (3L)
+#define NT_CAT_CCT_SIZE (4L)
+#define NT_CAT_CTE_SIZE (5L)
+#define NT_CAT_CTS_SIZE (6L)
+#define NT_CAT_DCT_PRESENT (7L)
+#define NT_CAT_DCT_SIZE (8L)
+#define NT_CAT_END_OFS_SUPPORT (9L)
+#define NT_CAT_FPC (10L)
+#define NT_CAT_FTE_SIZE (11L)
+#define NT_CAT_FUNCS (12L)
+#define NT_CAT_KCC_BANKS (13L)
+#define NT_CAT_KCC_PRESENT (14L)
+#define NT_CAT_KCC_SIZE (15L)
+#define NT_CAT_KCE_SIZE (16L)
+#define NT_CAT_KM_IF_CNT (17L)
+#define NT_CAT_KM_IF_M0 (18L)
+#define NT_CAT_KM_IF_M1 (19L)
+#define NT_CAT_N_CMP (20L)
+#define NT_CAT_N_EXT (21L)
+#define NT_CAT_N_LEN (22L)
+#define NT_CAT_RCK_SIZE (23L)
+#define NT_CAT_VALUES (24L)
+#define NT_CB_DEBUG (25L)
+#define NT_COR_CATEGORIES (26L)
+#define NT_COR_PRESENT (27L)
+#define NT_CPY_MASK_MEM (28L)
+#define NT_CSU_PRESENT (29L)
+#define NT_DBS_PRESENT (30L)
+#define NT_DBS_RX_QUEUES (31L)
+#define NT_DBS_TX_PORTS (32L)
+#define NT_DBS_TX_QUEUES (33L)
+#define NT_DDP_PRESENT (34L)
+#define NT_DDP_TBL_DEPTH (35L)
+#define NT_EMI_SPLIT_STEPS (36L)
+#define NT_EOF_TIMESTAMP_ONLY (37L)
+#define NT_EPP_CATEGORIES (38L)
+#define NT_EXT_MEM_NUM (39L)
+#define NT_EXT_MEM_SINGLE_SIZE_GB (40L)
+#define NT_FLM_CACHE (41L)
+#define NT_FLM_CATEGORIES (42L)
+#define NT_FLM_ENTRY_SIZE (43L)
+#define NT_FLM_PRESENT (44L)
+#define NT_FLM_PRIOS (45L)
+#define NT_FLM_PST_PROFILES (46L)
+#define NT_FLM_SIZE_MB (47L)
+#define NT_FLM_STATEFUL (48L)
+#define NT_FLM_VARIANT (49L)
+#define NT_GFG_PRESENT (50L)
+#define NT_GFG_TX_LIVE_RECONFIG_SUPPORT (51L)
+#define NT_GMF_FCS_PRESENT (52L)
+#define NT_GMF_IFG_SPEED_DIV (53L)
+#define NT_GMF_IFG_SPEED_DIV100G (54L)
+#define NT_GMF_IFG_SPEED_DIV100M (55L)
+#define NT_GMF_IFG_SPEED_DIV10G (56L)
+#define NT_GMF_IFG_SPEED_DIV1G (57L)
+#define NT_GMF_IFG_SPEED_DIV2 (58L)
+#define NT_GMF_IFG_SPEED_DIV25G (59L)
+#define NT_GMF_IFG_SPEED_DIV3 (60L)
+#define NT_GMF_IFG_SPEED_DIV4 (61L)
+#define NT_GMF_IFG_SPEED_DIV40G (62L)
+#define NT_GMF_IFG_SPEED_DIV50G (63L)
+#define NT_GMF_IFG_SPEED_MUL (64L)
+#define NT_GMF_IFG_SPEED_MUL100G (65L)
+#define NT_GMF_IFG_SPEED_MUL100M (66L)
+#define NT_GMF_IFG_SPEED_MUL10G (67L)
+#define NT_GMF_IFG_SPEED_MUL1G (68L)
+#define NT_GMF_IFG_SPEED_MUL2 (69L)
+#define NT_GMF_IFG_SPEED_MUL25G (70L)
+#define NT_GMF_IFG_SPEED_MUL3 (71L)
+#define NT_GMF_IFG_SPEED_MUL4 (72L)
+#define NT_GMF_IFG_SPEED_MUL40G (73L)
+#define NT_GMF_IFG_SPEED_MUL50G (74L)
+#define NT_GROUP_ID (75L)
+#define NT_HFU_PRESENT (76L)
+#define NT_HIF_MSIX_BAR (77L)
+#define NT_HIF_MSIX_PBA_OFS (78L)
+#define NT_HIF_MSIX_PRESENT (79L)
+#define NT_HIF_MSIX_TBL_OFS (80L)
+#define NT_HIF_MSIX_TBL_SIZE (81L)
+#define NT_HIF_PER_PS (82L)
+#define NT_HIF_SRIOV_PRESENT (83L)
+#define NT_HSH_CATEGORIES (84L)
+#define NT_HSH_TOEPLITZ (85L)
+#define NT_HST_CATEGORIES (86L)
+#define NT_HST_PRESENT (87L)
+#define NT_IOA_CATEGORIES (88L)
+#define NT_IOA_PRESENT (89L)
+#define NT_IPF_PRESENT (90L)
+#define NT_KM_CAM_BANKS (91L)
+#define NT_KM_CAM_RECORDS (92L)
+#define NT_KM_CAM_REC_WORDS (93L)
+#define NT_KM_CATEGORIES (94L)
+#define NT_KM_END_OFS_SUPPORT (95L)
+#define NT_KM_EXT_EXTRACTORS (96L)
+#define NT_KM_FLOW_SETS (97L)
+#define NT_KM_FLOW_TYPES (98L)
+#define NT_KM_PRESENT (99L)
+#define NT_KM_SWX_PRESENT (100L)
+#define NT_KM_SYNERGY_MATCH (101L)
+#define NT_KM_TCAM_BANKS (102L)
+#define NT_KM_TCAM_BANK_WIDTH (103L)
+#define NT_KM_TCAM_HIT_QUAL (104L)
+#define NT_KM_TCAM_KEYWAY (105L)
+#define NT_KM_WIDE (106L)
+#define NT_LR_PRESENT (107L)
+#define NT_LTX_CATEGORIES (108L)
+#define NT_MCU_DRAM_SIZE (109L)
+#define NT_MCU_PRESENT (110L)
+#define NT_MCU_TYPE (111L)
+#define NT_MDG_DEBUG_FLOW_CONTROL (112L)
+#define NT_MDG_DEBUG_REG_READ_BACK (113L)
+#define NT_MSK_CATEGORIES (114L)
+#define NT_MSK_PRESENT (115L)
+#define NT_NAME (116L)
+#define NT_NFV_OVS_PRODUCT (117L)
+#define NT_NIMS (118L)
+#define NT_PATCH_NUMBER (119L)
+#define NT_PCI_DEVICE_ID (120L)
+#define NT_PCI_INT_AVR (121L)
+#define NT_PCI_INT_EQM (122L)
+#define NT_PCI_INT_IIC0 (123L)
+#define NT_PCI_INT_IIC1 (124L)
+#define NT_PCI_INT_IIC2 (125L)
+#define NT_PCI_INT_IIC3 (126L)
+#define NT_PCI_INT_IIC4 (127L)
+#define NT_PCI_INT_IIC5 (128L)
+#define NT_PCI_INT_PORT (129L)
+#define NT_PCI_INT_PORT0 (130L)
+#define NT_PCI_INT_PORT1 (131L)
+#define NT_PCI_INT_PPS (132L)
+#define NT_PCI_INT_QSPI (133L)
+#define NT_PCI_INT_SPIM (134L)
+#define NT_PCI_INT_SPIS (135L)
+#define NT_PCI_INT_STA (136L)
+#define NT_PCI_INT_TIMER (137L)
+#define NT_PCI_INT_TINT (138L)
+#define NT_PCI_TA_TG_PRESENT (139L)
+#define NT_PCI_VENDOR_ID (140L)
+#define NT_PDB_CATEGORIES (141L)
+#define NT_PHY_ANEG_PRESENT (142L)
+#define NT_PHY_KRFEC_PRESENT (143L)
+#define NT_PHY_PORTS (144L)
+#define NT_PHY_PORTS_PER_QUAD (145L)
+#define NT_PHY_QUADS (146L)
+#define NT_PHY_RSFEC_PRESENT (147L)
+#define NT_PORTS (148L)
+#define NT_PROD_ID_LAYOUT_VERSION (149L)
+#define NT_QM_BLOCKS (150L)
+#define NT_QM_CELLS (151L)
+#define NT_QM_CELL_SIZE (152L)
+#define NT_QM_PRESENT (153L)
+#define NT_QSL_CATEGORIES (154L)
+#define NT_QSL_COLOR_SEL_BW (155L)
+#define NT_QSL_QST_SIZE (156L)
+#define NT_QUEUES (157L)
+#define NT_RAC_RAB_INTERFACES (158L)
+#define NT_RAC_RAB_OB_UPDATE (159L)
+#define NT_REVISION_ID (160L)
+#define NT_RMC_LAG_GROUPS (161L)
+#define NT_ROA_CATEGORIES (162L)
+#define NT_ROA_PRESENT (163L)
+#define NT_RPP_PER_PS (164L)
+#define NT_RTX_PRESENT (165L)
+#define NT_RX_HOST_BUFFERS (166L)
+#define NT_RX_PORTS (167L)
+#define NT_RX_PORT_REPLICATE (168L)
+#define NT_SLB_PRESENT (169L)
+#define NT_SLC_LR_PRESENT (170L)
+#define NT_STA_COLORS (171L)
+#define NT_STA_RX_PORTS (172L)
+#define NT_TBH_DEBUG_DLN (173L)
+#define NT_TBH_PRESENT (174L)
+#define NT_TFD_PRESENT (175L)
+#define NT_TPE_CATEGORIES (176L)
+#define NT_TSM_OST_ONLY (177L)
+#define NT_TS_APPEND (178L)
+#define NT_TS_INJECT_PRESENT (179L)
+#define NT_TX_CPY_PACKET_READERS (180L)
+#define NT_TX_CPY_PRESENT (181L)
+#define NT_TX_CPY_SIDEBAND_READERS (182L)
+#define NT_TX_CPY_VARIANT (183L)
+#define NT_TX_CPY_WRITERS (184L)
+#define NT_TX_HOST_BUFFERS (185L)
+#define NT_TX_INS_PRESENT (186L)
+#define NT_TX_MTU_PROFILE_IFR (187L)
+#define NT_TX_ON_TIMESTAMP (188L)
+#define NT_TX_PORTS (189L)
+#define NT_TX_PORT_REPLICATE (190L)
+#define NT_TX_RPL_DEPTH (191L)
+#define NT_TX_RPL_EXT_CATEGORIES (192L)
+#define NT_TX_RPL_PRESENT (193L)
+#define NT_TYPE_ID (194L)
+#define NT_USE_TRIPLE_SPEED (195L)
+#define NT_UUID (196L)
+#define NT_VERSION (197L)
+#define NT_VERSION_ID (198L)
+#define NT_VLI_PRESENT (199L)
+
+#endif /* _NTHW_FPGA_PARAMETERS_DEFS_ */
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
new file mode 100644
index 0000000000..9d0c30b20c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_registers_defs.h
@@ -0,0 +1,7277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTHW_FPGA_REGISTERS_DEFS_
+#define _NTHW_FPGA_REGISTERS_DEFS_
+
+/* CAT */
+#define CAT_CCE_CTRL (1000L)
+#define CAT_CCE_CTRL_ADR (1001L)
+#define CAT_CCE_CTRL_CNT (1002L)
+#define CAT_CCE_DATA (1003L)
+#define CAT_CCE_DATA_IMM (1004L)
+#define CAT_CCE_DATA_IND (1005L)
+#define CAT_CCS_CTRL (1006L)
+#define CAT_CCS_CTRL_ADR (1007L)
+#define CAT_CCS_CTRL_CNT (1008L)
+#define CAT_CCS_DATA (1009L)
+#define CAT_CCS_DATA_COR (1010L)
+#define CAT_CCS_DATA_COR_EN (1011L)
+#define CAT_CCS_DATA_EPP (1012L)
+#define CAT_CCS_DATA_EPP_EN (1013L)
+#define CAT_CCS_DATA_HSH (1014L)
+#define CAT_CCS_DATA_HSH_EN (1015L)
+#define CAT_CCS_DATA_HST (1016L)
+#define CAT_CCS_DATA_HST_EN (1017L)
+#define CAT_CCS_DATA_IPF (1018L)
+#define CAT_CCS_DATA_IPF_EN (1019L)
+#define CAT_CCS_DATA_MSK (1020L)
+#define CAT_CCS_DATA_MSK_EN (1021L)
+#define CAT_CCS_DATA_PDB (1022L)
+#define CAT_CCS_DATA_PDB_EN (1023L)
+#define CAT_CCS_DATA_QSL (1024L)
+#define CAT_CCS_DATA_QSL_EN (1025L)
+#define CAT_CCS_DATA_RRB (1026L)
+#define CAT_CCS_DATA_RRB_EN (1027L)
+#define CAT_CCS_DATA_SB0_DATA (1028L)
+#define CAT_CCS_DATA_SB0_TYPE (1029L)
+#define CAT_CCS_DATA_SB1_DATA (1030L)
+#define CAT_CCS_DATA_SB1_TYPE (1031L)
+#define CAT_CCS_DATA_SB2_DATA (1032L)
+#define CAT_CCS_DATA_SB2_TYPE (1033L)
+#define CAT_CCS_DATA_SLC (1034L)
+#define CAT_CCS_DATA_SLC_EN (1035L)
+#define CAT_CCS_DATA_TPE (1036L)
+#define CAT_CCS_DATA_TPE_EN (1037L)
+#define CAT_CCT_CTRL (1038L)
+#define CAT_CCT_CTRL_ADR (1039L)
+#define CAT_CCT_CTRL_CNT (1040L)
+#define CAT_CCT_DATA (1041L)
+#define CAT_CCT_DATA_COLOR (1042L)
+#define CAT_CCT_DATA_KM (1043L)
+#define CAT_CFN_CTRL (1044L)
+#define CAT_CFN_CTRL_ADR (1045L)
+#define CAT_CFN_CTRL_CNT (1046L)
+#define CAT_CFN_DATA (1047L)
+#define CAT_CFN_DATA_ENABLE (1048L)
+#define CAT_CFN_DATA_ERR_CV (1049L)
+#define CAT_CFN_DATA_ERR_FCS (1050L)
+#define CAT_CFN_DATA_ERR_INV (1051L)
+#define CAT_CFN_DATA_ERR_L3_CS (1052L)
+#define CAT_CFN_DATA_ERR_L4_CS (1053L)
+#define CAT_CFN_DATA_ERR_TNL_L3_CS (1054L)
+#define CAT_CFN_DATA_ERR_TNL_L4_CS (1055L)
+#define CAT_CFN_DATA_ERR_TNL_TTL_EXP (1056L)
+#define CAT_CFN_DATA_ERR_TRUNC (1057L)
+#define CAT_CFN_DATA_ERR_TTL_EXP (1058L)
+#define CAT_CFN_DATA_FLM_OR (1059L)
+#define CAT_CFN_DATA_INV (1060L)
+#define CAT_CFN_DATA_KM0_OR (1061L)
+#define CAT_CFN_DATA_KM1_OR (1062L)
+#define CAT_CFN_DATA_KM_OR (1063L)
+#define CAT_CFN_DATA_LC (1064L)
+#define CAT_CFN_DATA_LC_INV (1065L)
+#define CAT_CFN_DATA_MAC_PORT (1066L)
+#define CAT_CFN_DATA_PM_AND_INV (1067L)
+#define CAT_CFN_DATA_PM_CMB (1068L)
+#define CAT_CFN_DATA_PM_CMP (1069L)
+#define CAT_CFN_DATA_PM_DCT (1070L)
+#define CAT_CFN_DATA_PM_EXT_INV (1071L)
+#define CAT_CFN_DATA_PM_INV (1072L)
+#define CAT_CFN_DATA_PM_OR_INV (1073L)
+#define CAT_CFN_DATA_PTC_CFP (1074L)
+#define CAT_CFN_DATA_PTC_FRAG (1075L)
+#define CAT_CFN_DATA_PTC_INV (1076L)
+#define CAT_CFN_DATA_PTC_IP_PROT (1077L)
+#define CAT_CFN_DATA_PTC_ISL (1078L)
+#define CAT_CFN_DATA_PTC_L2 (1079L)
+#define CAT_CFN_DATA_PTC_L3 (1080L)
+#define CAT_CFN_DATA_PTC_L4 (1081L)
+#define CAT_CFN_DATA_PTC_MAC (1082L)
+#define CAT_CFN_DATA_PTC_MPLS (1083L)
+#define CAT_CFN_DATA_PTC_TNL_FRAG (1084L)
+#define CAT_CFN_DATA_PTC_TNL_IP_PROT (1085L)
+#define CAT_CFN_DATA_PTC_TNL_L2 (1086L)
+#define CAT_CFN_DATA_PTC_TNL_L3 (1087L)
+#define CAT_CFN_DATA_PTC_TNL_L4 (1088L)
+#define CAT_CFN_DATA_PTC_TNL_MPLS (1089L)
+#define CAT_CFN_DATA_PTC_TNL_VLAN (1090L)
+#define CAT_CFN_DATA_PTC_TUNNEL (1091L)
+#define CAT_CFN_DATA_PTC_VLAN (1092L)
+#define CAT_CFN_DATA_PTC_VNTAG (1093L)
+#define CAT_COT_CTRL (1094L)
+#define CAT_COT_CTRL_ADR (1095L)
+#define CAT_COT_CTRL_CNT (1096L)
+#define CAT_COT_DATA (1097L)
+#define CAT_COT_DATA_COLOR (1098L)
+#define CAT_COT_DATA_KM (1099L)
+#define CAT_COT_DATA_NFV_SB (1100L)
+#define CAT_CTE_CTRL (1101L)
+#define CAT_CTE_CTRL_ADR (1102L)
+#define CAT_CTE_CTRL_CNT (1103L)
+#define CAT_CTE_DATA (1104L)
+#define CAT_CTE_DATA_COL_ENABLE (1105L)
+#define CAT_CTE_DATA_COR_ENABLE (1106L)
+#define CAT_CTE_DATA_EPP_ENABLE (1107L)
+#define CAT_CTE_DATA_HSH_ENABLE (1108L)
+#define CAT_CTE_DATA_HST_ENABLE (1109L)
+#define CAT_CTE_DATA_IPF_ENABLE (1110L)
+#define CAT_CTE_DATA_MSK_ENABLE (1111L)
+#define CAT_CTE_DATA_PDB_ENABLE (1112L)
+#define CAT_CTE_DATA_QSL_ENABLE (1113L)
+#define CAT_CTE_DATA_RRB_ENABLE (1114L)
+#define CAT_CTE_DATA_SLC_ENABLE (1115L)
+#define CAT_CTE_DATA_TPE_ENABLE (1116L)
+#define CAT_CTE_DATA_TX_INS_ENABLE (1117L)
+#define CAT_CTE_DATA_TX_RPL_ENABLE (1118L)
+#define CAT_CTS_CTRL (1119L)
+#define CAT_CTS_CTRL_ADR (1120L)
+#define CAT_CTS_CTRL_CNT (1121L)
+#define CAT_CTS_DATA (1122L)
+#define CAT_CTS_DATA_CAT_A (1123L)
+#define CAT_CTS_DATA_CAT_B (1124L)
+#define CAT_DCT_CTRL (1125L)
+#define CAT_DCT_CTRL_ADR (1126L)
+#define CAT_DCT_CTRL_CNT (1127L)
+#define CAT_DCT_DATA (1128L)
+#define CAT_DCT_DATA_RES (1129L)
+#define CAT_DCT_SEL (1130L)
+#define CAT_DCT_SEL_LU (1131L)
+#define CAT_EXO_CTRL (1132L)
+#define CAT_EXO_CTRL_ADR (1133L)
+#define CAT_EXO_CTRL_CNT (1134L)
+#define CAT_EXO_DATA (1135L)
+#define CAT_EXO_DATA_DYN (1136L)
+#define CAT_EXO_DATA_OFS (1137L)
+#define CAT_FCE_CTRL (1138L)
+#define CAT_FCE_CTRL_ADR (1139L)
+#define CAT_FCE_CTRL_CNT (1140L)
+#define CAT_FCE_DATA (1141L)
+#define CAT_FCE_DATA_ENABLE (1142L)
+#define CAT_FCS_CTRL (1143L)
+#define CAT_FCS_CTRL_ADR (1144L)
+#define CAT_FCS_CTRL_CNT (1145L)
+#define CAT_FCS_DATA (1146L)
+#define CAT_FCS_DATA_CATEGORY (1147L)
+#define CAT_FTE0_CTRL (1148L)
+#define CAT_FTE0_CTRL_ADR (1149L)
+#define CAT_FTE0_CTRL_CNT (1150L)
+#define CAT_FTE0_DATA (1151L)
+#define CAT_FTE0_DATA_ENABLE (1152L)
+#define CAT_FTE1_CTRL (1153L)
+#define CAT_FTE1_CTRL_ADR (1154L)
+#define CAT_FTE1_CTRL_CNT (1155L)
+#define CAT_FTE1_DATA (1156L)
+#define CAT_FTE1_DATA_ENABLE (1157L)
+#define CAT_FTE_CTRL (1158L)
+#define CAT_FTE_CTRL_ADR (1159L)
+#define CAT_FTE_CTRL_CNT (1160L)
+#define CAT_FTE_DATA (1161L)
+#define CAT_FTE_DATA_ENABLE (1162L)
+#define CAT_FTE_FLM_CTRL (1163L)
+#define CAT_FTE_FLM_CTRL_ADR (1164L)
+#define CAT_FTE_FLM_CTRL_CNT (1165L)
+#define CAT_FTE_FLM_DATA (1166L)
+#define CAT_FTE_FLM_DATA_ENABLE (1167L)
+#define CAT_JOIN (1168L)
+#define CAT_JOIN_J1 (1169L)
+#define CAT_JOIN_J2 (1170L)
+#define CAT_KCC (1171L)
+#define CAT_KCC_CTRL (1172L)
+#define CAT_KCC_CTRL_ADR (1173L)
+#define CAT_KCC_CTRL_CNT (1174L)
+#define CAT_KCC_DATA (1175L)
+#define CAT_KCC_DATA_CATEGORY (1176L)
+#define CAT_KCC_DATA_ID (1177L)
+#define CAT_KCC_DATA_KEY (1178L)
+#define CAT_KCE0_CTRL (1179L)
+#define CAT_KCE0_CTRL_ADR (1180L)
+#define CAT_KCE0_CTRL_CNT (1181L)
+#define CAT_KCE0_DATA (1182L)
+#define CAT_KCE0_DATA_ENABLE (1183L)
+#define CAT_KCE1_CTRL (1184L)
+#define CAT_KCE1_CTRL_ADR (1185L)
+#define CAT_KCE1_CTRL_CNT (1186L)
+#define CAT_KCE1_DATA (1187L)
+#define CAT_KCE1_DATA_ENABLE (1188L)
+#define CAT_KCE_CTRL (1189L)
+#define CAT_KCE_CTRL_ADR (1190L)
+#define CAT_KCE_CTRL_CNT (1191L)
+#define CAT_KCE_DATA (1192L)
+#define CAT_KCE_DATA_ENABLE (1193L)
+#define CAT_KCS0_CTRL (1194L)
+#define CAT_KCS0_CTRL_ADR (1195L)
+#define CAT_KCS0_CTRL_CNT (1196L)
+#define CAT_KCS0_DATA (1197L)
+#define CAT_KCS0_DATA_CATEGORY (1198L)
+#define CAT_KCS1_CTRL (1199L)
+#define CAT_KCS1_CTRL_ADR (1200L)
+#define CAT_KCS1_CTRL_CNT (1201L)
+#define CAT_KCS1_DATA (1202L)
+#define CAT_KCS1_DATA_CATEGORY (1203L)
+#define CAT_KCS_CTRL (1204L)
+#define CAT_KCS_CTRL_ADR (1205L)
+#define CAT_KCS_CTRL_CNT (1206L)
+#define CAT_KCS_DATA (1207L)
+#define CAT_KCS_DATA_CATEGORY (1208L)
+#define CAT_LEN_CTRL (1209L)
+#define CAT_LEN_CTRL_ADR (1210L)
+#define CAT_LEN_CTRL_CNT (1211L)
+#define CAT_LEN_DATA (1212L)
+#define CAT_LEN_DATA_DYN1 (1213L)
+#define CAT_LEN_DATA_DYN2 (1214L)
+#define CAT_LEN_DATA_INV (1215L)
+#define CAT_LEN_DATA_LOWER (1216L)
+#define CAT_LEN_DATA_UPPER (1217L)
+#define CAT_RCK_CTRL (1218L)
+#define CAT_RCK_CTRL_ADR (1219L)
+#define CAT_RCK_CTRL_CNT (1220L)
+#define CAT_RCK_DATA (1221L)
+#define CAT_RCK_DATA_CM0U (1222L)
+#define CAT_RCK_DATA_CM1U (1223L)
+#define CAT_RCK_DATA_CM2U (1224L)
+#define CAT_RCK_DATA_CM3U (1225L)
+#define CAT_RCK_DATA_CM4U (1226L)
+#define CAT_RCK_DATA_CM5U (1227L)
+#define CAT_RCK_DATA_CM6U (1228L)
+#define CAT_RCK_DATA_CM7U (1229L)
+#define CAT_RCK_DATA_CML0 (1230L)
+#define CAT_RCK_DATA_CML1 (1231L)
+#define CAT_RCK_DATA_CML2 (1232L)
+#define CAT_RCK_DATA_CML3 (1233L)
+#define CAT_RCK_DATA_CML4 (1234L)
+#define CAT_RCK_DATA_CML5 (1235L)
+#define CAT_RCK_DATA_CML6 (1236L)
+#define CAT_RCK_DATA_CML7 (1237L)
+#define CAT_RCK_DATA_SEL0 (1238L)
+#define CAT_RCK_DATA_SEL1 (1239L)
+#define CAT_RCK_DATA_SEL2 (1240L)
+#define CAT_RCK_DATA_SEL3 (1241L)
+#define CAT_RCK_DATA_SEL4 (1242L)
+#define CAT_RCK_DATA_SEL5 (1243L)
+#define CAT_RCK_DATA_SEL6 (1244L)
+#define CAT_RCK_DATA_SEL7 (1245L)
+#define CAT_RCK_DATA_SEU0 (1246L)
+#define CAT_RCK_DATA_SEU1 (1247L)
+#define CAT_RCK_DATA_SEU2 (1248L)
+#define CAT_RCK_DATA_SEU3 (1249L)
+#define CAT_RCK_DATA_SEU4 (1250L)
+#define CAT_RCK_DATA_SEU5 (1251L)
+#define CAT_RCK_DATA_SEU6 (1252L)
+#define CAT_RCK_DATA_SEU7 (1253L)
+/* CB */
+#define CB_CTRL (1254L)
+#define CB_CTRL_BP (1255L)
+#define CB_CTRL_BYPASS (1256L)
+#define CB_CTRL_ENABLE (1257L)
+#define CB_CTRL_QMA (1258L)
+#define CB_CTRL_QME (1259L)
+#define CB_DBG_BP (1260L)
+#define CB_DBG_BP_CNT (1261L)
+#define CB_DBG_DQ (1262L)
+#define CB_DBG_DQ_MAX (1263L)
+#define CB_DBG_EGS_QUEUE (1264L)
+#define CB_DBG_EGS_QUEUE_ADD (1265L)
+#define CB_DBG_EGS_QUEUE_AND (1266L)
+#define CB_DBG_FREE1200 (1267L)
+#define CB_DBG_FREE1200_CNT (1268L)
+#define CB_DBG_FREE1800 (1269L)
+#define CB_DBG_FREE1800_CNT (1270L)
+#define CB_DBG_FREE600 (1271L)
+#define CB_DBG_FREE600_CNT (1272L)
+#define CB_DBG_H16 (1273L)
+#define CB_DBG_H16_CNT (1274L)
+#define CB_DBG_H32 (1275L)
+#define CB_DBG_H32_CNT (1276L)
+#define CB_DBG_H64 (1277L)
+#define CB_DBG_H64_CNT (1278L)
+#define CB_DBG_HAVE (1279L)
+#define CB_DBG_HAVE_CNT (1280L)
+#define CB_DBG_IGS_QUEUE (1281L)
+#define CB_DBG_IGS_QUEUE_ADD (1282L)
+#define CB_DBG_IGS_QUEUE_AND (1283L)
+#define CB_DBG_QM_CELL_CNT (1284L)
+#define CB_DBG_QM_CELL_CNT_CNT (1285L)
+#define CB_DBG_QM_CELL_XOR (1286L)
+#define CB_DBG_QM_CELL_XOR_XOR (1287L)
+#define CB_QPM_CTRL (1288L)
+#define CB_QPM_CTRL_ADR (1289L)
+#define CB_QPM_CTRL_CNT (1290L)
+#define CB_QPM_DATA (1291L)
+#define CB_QPM_DATA_P (1292L)
+#define CB_QUEUE_MAX (1293L)
+#define CB_QUEUE_MAX_MAX (1294L)
+#define CB_STATUS (1295L)
+#define CB_STATUS_BP (1296L)
+#define CB_STATUS_DB (1297L)
+#define CB_STATUS_EMPTY (1298L)
+#define CB_STATUS_IDLE (1299L)
+#define CB_STATUS_OVF (1300L)
+#define CB_TS_RATE (1301L)
+#define CB_TS_RATE_CNT (1302L)
+#define CB_TS_SAVE (1303L)
+#define CB_TS_SAVE_MAX (1304L)
+/* CCIP */
+#define CCIP_AFU_ID_L (1305L)
+#define CCIP_AFU_ID_L_ID (1306L)
+#define CCIP_AFU_ID_U (1307L)
+#define CCIP_AFU_ID_U_ID (1308L)
+#define CCIP_CONTROL (1309L)
+#define CCIP_CONTROL_FENCE (1310L)
+#define CCIP_DFH (1311L)
+#define CCIP_DFH_AFU_VER_MAJOR (1312L)
+#define CCIP_DFH_AFU_VER_MINOR (1313L)
+#define CCIP_DFH_CCIP_VER (1314L)
+#define CCIP_DFH_END (1315L)
+#define CCIP_DFH_FEATURE_TYPE (1316L)
+#define CCIP_DFH_NEXT (1317L)
+#define CCIP_RSVD0 (1318L)
+#define CCIP_RSVD0_RSVD (1319L)
+#define CCIP_RSVD1 (1320L)
+#define CCIP_RSVD1_RSVD (1321L)
+#define CCIP_STATUS (1322L)
+#define CCIP_STATUS_ERR (1323L)
+#define CCIP_STATUS_PWR (1324L)
+/* CFP4_CTRL_GBOX */
+#define CFP4_CTRL_GBOX_CFG (1325L)
+#define CFP4_CTRL_GBOX_CFG_GLB_ALARMN (1326L)
+#define CFP4_CTRL_GBOX_CFG_INTERR (1327L)
+#define CFP4_CTRL_GBOX_CFG_MOD_ABS (1328L)
+#define CFP4_CTRL_GBOX_CFG_MOD_LOPWR (1329L)
+#define CFP4_CTRL_GBOX_CFG_MOD_RSTN (1330L)
+#define CFP4_CTRL_GBOX_CFG_NRESET (1331L)
+#define CFP4_CTRL_GBOX_CFG_RXLOS (1332L)
+#define CFP4_CTRL_GBOX_CFG_TXDIS (1333L)
+#define CFP4_CTRL_GBOX_CFP4 (1334L)
+#define CFP4_CTRL_GBOX_CFP4_GLB_ALARMN (1335L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_ABS (1336L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_LOPWR (1337L)
+#define CFP4_CTRL_GBOX_CFP4_MOD_RSTN (1338L)
+#define CFP4_CTRL_GBOX_CFP4_RXLOS (1339L)
+#define CFP4_CTRL_GBOX_CFP4_TXDIS (1340L)
+#define CFP4_CTRL_GBOX_GBOX (1341L)
+#define CFP4_CTRL_GBOX_GBOX_INTERR (1342L)
+#define CFP4_CTRL_GBOX_GBOX_NRESET (1343L)
+#define CFP4_CTRL_GBOX_GPIO (1344L)
+#define CFP4_CTRL_GBOX_GPIO_GLB_ALARMN (1345L)
+#define CFP4_CTRL_GBOX_GPIO_INTERR (1346L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_ABS (1347L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_LOPWR (1348L)
+#define CFP4_CTRL_GBOX_GPIO_MOD_RSTN (1349L)
+#define CFP4_CTRL_GBOX_GPIO_NRESET (1350L)
+#define CFP4_CTRL_GBOX_GPIO_RXLOS (1351L)
+#define CFP4_CTRL_GBOX_GPIO_TXDIS (1352L)
+/* COR */
+#define COR_CTRL (1393L)
+#define COR_CTRL_EN (1394L)
+#define COR_DBG_COR_CNT (1395L)
+#define COR_DBG_COR_CNT_VAL (1396L)
+#define COR_DBG_COR_ID (1397L)
+#define COR_DBG_COR_ID_VAL (1398L)
+#define COR_DBG_COR_LO (1399L)
+#define COR_DBG_COR_LO_VAL (1400L)
+#define COR_DBG_COR_UP (1401L)
+#define COR_DBG_COR_UP_VAL (1402L)
+#define COR_DCEO (1403L)
+#define COR_DCEO_VAL (1404L)
+#define COR_DCSO (1405L)
+#define COR_DCSO_VAL (1406L)
+#define COR_DEEO (1407L)
+#define COR_DEEO_VAL (1408L)
+#define COR_DEO (1409L)
+#define COR_DEO_VAL (1410L)
+#define COR_DESO (1411L)
+#define COR_DESO_VAL (1412L)
+#define COR_DSEO (1413L)
+#define COR_DSEO_VAL (1414L)
+#define COR_DSO (1415L)
+#define COR_DSO_VAL (1416L)
+#define COR_DSSO (1417L)
+#define COR_DSSO_VAL (1418L)
+#define COR_RCP_CTRL (1419L)
+#define COR_RCP_CTRL_ADR (1420L)
+#define COR_RCP_CTRL_CNT (1421L)
+#define COR_RCP_DATA (1422L)
+#define COR_RCP_DATA_CBM1 (1423L)
+#define COR_RCP_DATA_EN (1424L)
+#define COR_RCP_DATA_END_PROT (1425L)
+#define COR_RCP_DATA_END_STATIC (1426L)
+#define COR_RCP_DATA_IP_CHK (1427L)
+#define COR_RCP_DATA_IP_DSCP (1428L)
+#define COR_RCP_DATA_IP_DST (1429L)
+#define COR_RCP_DATA_IP_ECN (1430L)
+#define COR_RCP_DATA_IP_FLAGS (1431L)
+#define COR_RCP_DATA_IP_FLOW (1432L)
+#define COR_RCP_DATA_IP_HOP (1433L)
+#define COR_RCP_DATA_IP_IDENT (1434L)
+#define COR_RCP_DATA_IP_NXTHDR (1435L)
+#define COR_RCP_DATA_IP_SRC (1436L)
+#define COR_RCP_DATA_IP_TC (1437L)
+#define COR_RCP_DATA_IP_TTL (1438L)
+#define COR_RCP_DATA_MAX_LEN (1439L)
+#define COR_RCP_DATA_PROT_OFS1 (1440L)
+#define COR_RCP_DATA_START_PROT (1441L)
+#define COR_RCP_DATA_START_STATIC (1442L)
+#define COR_RCP_DATA_STTC_OFS1 (1443L)
+#define COR_RCP_DATA_TCP_CHK (1444L)
+#define COR_RCP_DATA_TCP_DST (1445L)
+#define COR_RCP_DATA_TCP_SEQ (1446L)
+#define COR_RCP_DATA_TCP_SRC (1447L)
+#define COR_RCP_DATA_TNL (1448L)
+#define COR_RCP_DATA_UDP_CHK (1449L)
+#define COR_RCP_DATA_UDP_DST (1450L)
+#define COR_RCP_DATA_UDP_SRC (1451L)
+/* CPY */
+#define CPY_WRITER0_CTRL (1452L)
+#define CPY_WRITER0_CTRL_ADR (1453L)
+#define CPY_WRITER0_CTRL_CNT (1454L)
+#define CPY_WRITER0_DATA (1455L)
+#define CPY_WRITER0_DATA_DYN (1456L)
+#define CPY_WRITER0_DATA_LEN (1457L)
+#define CPY_WRITER0_DATA_MASK_POINTER (1458L)
+#define CPY_WRITER0_DATA_OFS (1459L)
+#define CPY_WRITER0_DATA_READER_SELECT (1460L)
+#define CPY_WRITER0_MASK_CTRL (1461L)
+#define CPY_WRITER0_MASK_CTRL_ADR (1462L)
+#define CPY_WRITER0_MASK_CTRL_CNT (1463L)
+#define CPY_WRITER0_MASK_DATA (1464L)
+#define CPY_WRITER0_MASK_DATA_BYTE_MASK (1465L)
+#define CPY_WRITER1_CTRL (1466L)
+#define CPY_WRITER1_CTRL_ADR (1467L)
+#define CPY_WRITER1_CTRL_CNT (1468L)
+#define CPY_WRITER1_DATA (1469L)
+#define CPY_WRITER1_DATA_DYN (1470L)
+#define CPY_WRITER1_DATA_LEN (1471L)
+#define CPY_WRITER1_DATA_MASK_POINTER (1472L)
+#define CPY_WRITER1_DATA_OFS (1473L)
+#define CPY_WRITER1_DATA_READER_SELECT (1474L)
+#define CPY_WRITER1_MASK_CTRL (1475L)
+#define CPY_WRITER1_MASK_CTRL_ADR (1476L)
+#define CPY_WRITER1_MASK_CTRL_CNT (1477L)
+#define CPY_WRITER1_MASK_DATA (1478L)
+#define CPY_WRITER1_MASK_DATA_BYTE_MASK (1479L)
+#define CPY_WRITER2_CTRL (1480L)
+#define CPY_WRITER2_CTRL_ADR (1481L)
+#define CPY_WRITER2_CTRL_CNT (1482L)
+#define CPY_WRITER2_DATA (1483L)
+#define CPY_WRITER2_DATA_DYN (1484L)
+#define CPY_WRITER2_DATA_LEN (1485L)
+#define CPY_WRITER2_DATA_MASK_POINTER (1486L)
+#define CPY_WRITER2_DATA_OFS (1487L)
+#define CPY_WRITER2_DATA_READER_SELECT (1488L)
+#define CPY_WRITER2_MASK_CTRL (1489L)
+#define CPY_WRITER2_MASK_CTRL_ADR (1490L)
+#define CPY_WRITER2_MASK_CTRL_CNT (1491L)
+#define CPY_WRITER2_MASK_DATA (1492L)
+#define CPY_WRITER2_MASK_DATA_BYTE_MASK (1493L)
+#define CPY_WRITER3_CTRL (1494L)
+#define CPY_WRITER3_CTRL_ADR (1495L)
+#define CPY_WRITER3_CTRL_CNT (1496L)
+#define CPY_WRITER3_DATA (1497L)
+#define CPY_WRITER3_DATA_DYN (1498L)
+#define CPY_WRITER3_DATA_LEN (1499L)
+#define CPY_WRITER3_DATA_MASK_POINTER (1500L)
+#define CPY_WRITER3_DATA_OFS (1501L)
+#define CPY_WRITER3_DATA_READER_SELECT (1502L)
+#define CPY_WRITER3_MASK_CTRL (1503L)
+#define CPY_WRITER3_MASK_CTRL_ADR (1504L)
+#define CPY_WRITER3_MASK_CTRL_CNT (1505L)
+#define CPY_WRITER3_MASK_DATA (1506L)
+#define CPY_WRITER3_MASK_DATA_BYTE_MASK (1507L)
+#define CPY_WRITER4_CTRL (1508L)
+#define CPY_WRITER4_CTRL_ADR (1509L)
+#define CPY_WRITER4_CTRL_CNT (1510L)
+#define CPY_WRITER4_DATA (1511L)
+#define CPY_WRITER4_DATA_DYN (1512L)
+#define CPY_WRITER4_DATA_LEN (1513L)
+#define CPY_WRITER4_DATA_MASK_POINTER (1514L)
+#define CPY_WRITER4_DATA_OFS (1515L)
+#define CPY_WRITER4_DATA_READER_SELECT (1516L)
+#define CPY_WRITER4_MASK_CTRL (1517L)
+#define CPY_WRITER4_MASK_CTRL_ADR (1518L)
+#define CPY_WRITER4_MASK_CTRL_CNT (1519L)
+#define CPY_WRITER4_MASK_DATA (1520L)
+#define CPY_WRITER4_MASK_DATA_BYTE_MASK (1521L)
+#define CPY_WRITER5_CTRL (1522L)
+#define CPY_WRITER5_CTRL_ADR (1523L)
+#define CPY_WRITER5_CTRL_CNT (1524L)
+#define CPY_WRITER5_DATA (1525L)
+#define CPY_WRITER5_DATA_DYN (1526L)
+#define CPY_WRITER5_DATA_LEN (1527L)
+#define CPY_WRITER5_DATA_MASK_POINTER (1528L)
+#define CPY_WRITER5_DATA_OFS (1529L)
+#define CPY_WRITER5_DATA_READER_SELECT (1530L)
+#define CPY_WRITER5_MASK_CTRL (1531L)
+#define CPY_WRITER5_MASK_CTRL_ADR (1532L)
+#define CPY_WRITER5_MASK_CTRL_CNT (1533L)
+#define CPY_WRITER5_MASK_DATA (1534L)
+#define CPY_WRITER5_MASK_DATA_BYTE_MASK (1535L)
+/* CSU */
+#define CSU_RCP_CTRL (1536L)
+#define CSU_RCP_CTRL_ADR (1537L)
+#define CSU_RCP_CTRL_CNT (1538L)
+#define CSU_RCP_DATA (1539L)
+#define CSU_RCP_DATA_IL3_CMD (1540L)
+#define CSU_RCP_DATA_IL4_CMD (1541L)
+#define CSU_RCP_DATA_OL3_CMD (1542L)
+#define CSU_RCP_DATA_OL4_CMD (1543L)
+/* DBS */
+#define DBS_RX_AM_CTRL (1544L)
+#define DBS_RX_AM_CTRL_ADR (1545L)
+#define DBS_RX_AM_CTRL_CNT (1546L)
+#define DBS_RX_AM_DATA (1547L)
+#define DBS_RX_AM_DATA_ENABLE (1548L)
+#define DBS_RX_AM_DATA_GPA (1549L)
+#define DBS_RX_AM_DATA_HID (1550L)
+#define DBS_RX_AM_DATA_INT (1551L)
+#define DBS_RX_AM_DATA_PCKED (1552L)
+#define DBS_RX_CONTROL (1553L)
+#define DBS_RX_CONTROL_AME (1554L)
+#define DBS_RX_CONTROL_AMS (1555L)
+#define DBS_RX_CONTROL_LQ (1556L)
+#define DBS_RX_CONTROL_QE (1557L)
+#define DBS_RX_CONTROL_UWE (1558L)
+#define DBS_RX_CONTROL_UWS (1559L)
+#define DBS_RX_DR_CTRL (1560L)
+#define DBS_RX_DR_CTRL_ADR (1561L)
+#define DBS_RX_DR_CTRL_CNT (1562L)
+#define DBS_RX_DR_DATA (1563L)
+#define DBS_RX_DR_DATA_GPA (1564L)
+#define DBS_RX_DR_DATA_HDR (1565L)
+#define DBS_RX_DR_DATA_HID (1566L)
+#define DBS_RX_DR_DATA_PCKED (1567L)
+#define DBS_RX_DR_DATA_QS (1568L)
+#define DBS_RX_IDLE (1569L)
+#define DBS_RX_IDLE_BUSY (1570L)
+#define DBS_RX_IDLE_IDLE (1571L)
+#define DBS_RX_IDLE_QUEUE (1572L)
+#define DBS_RX_INIT (1573L)
+#define DBS_RX_INIT_BUSY (1574L)
+#define DBS_RX_INIT_INIT (1575L)
+#define DBS_RX_INIT_QUEUE (1576L)
+#define DBS_RX_INIT_VAL (1577L)
+#define DBS_RX_INIT_VAL_IDX (1578L)
+#define DBS_RX_INIT_VAL_PTR (1579L)
+#define DBS_RX_PTR (1580L)
+#define DBS_RX_PTR_PTR (1581L)
+#define DBS_RX_PTR_QUEUE (1582L)
+#define DBS_RX_PTR_VALID (1583L)
+#define DBS_RX_UW_CTRL (1584L)
+#define DBS_RX_UW_CTRL_ADR (1585L)
+#define DBS_RX_UW_CTRL_CNT (1586L)
+#define DBS_RX_UW_DATA (1587L)
+#define DBS_RX_UW_DATA_GPA (1588L)
+#define DBS_RX_UW_DATA_HID (1589L)
+#define DBS_RX_UW_DATA_INT (1590L)
+#define DBS_RX_UW_DATA_ISTK (1591L)
+#define DBS_RX_UW_DATA_PCKED (1592L)
+#define DBS_RX_UW_DATA_QS (1593L)
+#define DBS_RX_UW_DATA_VEC (1594L)
+#define DBS_STATUS (1595L)
+#define DBS_STATUS_OK (1596L)
+#define DBS_TX_AM_CTRL (1597L)
+#define DBS_TX_AM_CTRL_ADR (1598L)
+#define DBS_TX_AM_CTRL_CNT (1599L)
+#define DBS_TX_AM_DATA (1600L)
+#define DBS_TX_AM_DATA_ENABLE (1601L)
+#define DBS_TX_AM_DATA_GPA (1602L)
+#define DBS_TX_AM_DATA_HID (1603L)
+#define DBS_TX_AM_DATA_INT (1604L)
+#define DBS_TX_AM_DATA_PCKED (1605L)
+#define DBS_TX_CONTROL (1606L)
+#define DBS_TX_CONTROL_AME (1607L)
+#define DBS_TX_CONTROL_AMS (1608L)
+#define DBS_TX_CONTROL_LQ (1609L)
+#define DBS_TX_CONTROL_QE (1610L)
+#define DBS_TX_CONTROL_UWE (1611L)
+#define DBS_TX_CONTROL_UWS (1612L)
+#define DBS_TX_DR_CTRL (1613L)
+#define DBS_TX_DR_CTRL_ADR (1614L)
+#define DBS_TX_DR_CTRL_CNT (1615L)
+#define DBS_TX_DR_DATA (1616L)
+#define DBS_TX_DR_DATA_GPA (1617L)
+#define DBS_TX_DR_DATA_HDR (1618L)
+#define DBS_TX_DR_DATA_HID (1619L)
+#define DBS_TX_DR_DATA_PCKED (1620L)
+#define DBS_TX_DR_DATA_PORT (1621L)
+#define DBS_TX_DR_DATA_QS (1622L)
+#define DBS_TX_IDLE (1623L)
+#define DBS_TX_IDLE_BUSY (1624L)
+#define DBS_TX_IDLE_IDLE (1625L)
+#define DBS_TX_IDLE_QUEUE (1626L)
+#define DBS_TX_INIT (1627L)
+#define DBS_TX_INIT_BUSY (1628L)
+#define DBS_TX_INIT_INIT (1629L)
+#define DBS_TX_INIT_QUEUE (1630L)
+#define DBS_TX_INIT_VAL (1631L)
+#define DBS_TX_INIT_VAL_IDX (1632L)
+#define DBS_TX_INIT_VAL_PTR (1633L)
+#define DBS_TX_PTR (1634L)
+#define DBS_TX_PTR_PTR (1635L)
+#define DBS_TX_PTR_QUEUE (1636L)
+#define DBS_TX_PTR_VALID (1637L)
+#define DBS_TX_QOS_CTRL (1638L)
+#define DBS_TX_QOS_CTRL_ADR (1639L)
+#define DBS_TX_QOS_CTRL_CNT (1640L)
+#define DBS_TX_QOS_DATA (1641L)
+#define DBS_TX_QOS_DATA_BS (1642L)
+#define DBS_TX_QOS_DATA_EN (1643L)
+#define DBS_TX_QOS_DATA_IR (1644L)
+#define DBS_TX_QOS_DATA_MUL (1645L)
+#define DBS_TX_QOS_RATE (1646L)
+#define DBS_TX_QOS_RATE_DIV (1647L)
+#define DBS_TX_QOS_RATE_MUL (1648L)
+#define DBS_TX_QP_CTRL (1649L)
+#define DBS_TX_QP_CTRL_ADR (1650L)
+#define DBS_TX_QP_CTRL_CNT (1651L)
+#define DBS_TX_QP_DATA (1652L)
+#define DBS_TX_QP_DATA_VPORT (1653L)
+#define DBS_TX_UW_CTRL (1654L)
+#define DBS_TX_UW_CTRL_ADR (1655L)
+#define DBS_TX_UW_CTRL_CNT (1656L)
+#define DBS_TX_UW_DATA (1657L)
+#define DBS_TX_UW_DATA_GPA (1658L)
+#define DBS_TX_UW_DATA_HID (1659L)
+#define DBS_TX_UW_DATA_INO (1660L)
+#define DBS_TX_UW_DATA_INT (1661L)
+#define DBS_TX_UW_DATA_ISTK (1662L)
+#define DBS_TX_UW_DATA_PCKED (1663L)
+#define DBS_TX_UW_DATA_QS (1664L)
+#define DBS_TX_UW_DATA_VEC (1665L)
+/* DDP */
+#define DDP_AGING_CTRL (1666L)
+#define DDP_AGING_CTRL_AGING_RATE (1667L)
+#define DDP_AGING_CTRL_MAX_CNT (1668L)
+#define DDP_CTRL (1669L)
+#define DDP_CTRL_INIT (1670L)
+#define DDP_CTRL_INIT_DONE (1671L)
+#define DDP_RCP_CTRL (1672L)
+#define DDP_RCP_CTRL_ADR (1673L)
+#define DDP_RCP_CTRL_CNT (1674L)
+#define DDP_RCP_DATA (1675L)
+#define DDP_RCP_DATA_EN (1676L)
+#define DDP_RCP_DATA_GROUPID (1677L)
+/* EPP */
+#define EPP_QUEUE_MTU_CTRL (1755L)
+#define EPP_QUEUE_MTU_CTRL_ADR (1756L)
+#define EPP_QUEUE_MTU_CTRL_CNT (1757L)
+#define EPP_QUEUE_MTU_DATA (1758L)
+#define EPP_QUEUE_MTU_DATA_MAX_MTU (1759L)
+#define EPP_QUEUE_VPORT_CTRL (1760L)
+#define EPP_QUEUE_VPORT_CTRL_ADR (1761L)
+#define EPP_QUEUE_VPORT_CTRL_CNT (1762L)
+#define EPP_QUEUE_VPORT_DATA (1763L)
+#define EPP_QUEUE_VPORT_DATA_VPORT (1764L)
+#define EPP_RCP_CTRL (1765L)
+#define EPP_RCP_CTRL_ADR (1766L)
+#define EPP_RCP_CTRL_CNT (1767L)
+#define EPP_RCP_DATA (1768L)
+#define EPP_RCP_DATA_FIXED_18B_L2_MTU (1769L)
+#define EPP_RCP_DATA_QUEUE_MTU_EPP_EN (1770L)
+#define EPP_RCP_DATA_QUEUE_QOS_EPP_EN (1771L)
+#define EPP_RCP_DATA_SIZE_ADJUST_TXP (1772L)
+#define EPP_RCP_DATA_SIZE_ADJUST_VPORT (1773L)
+#define EPP_RCP_DATA_TX_MTU_EPP_EN (1774L)
+#define EPP_RCP_DATA_TX_QOS_EPP_EN (1775L)
+#define EPP_TXP_MTU_CTRL (1776L)
+#define EPP_TXP_MTU_CTRL_ADR (1777L)
+#define EPP_TXP_MTU_CTRL_CNT (1778L)
+#define EPP_TXP_MTU_DATA (1779L)
+#define EPP_TXP_MTU_DATA_MAX_MTU (1780L)
+#define EPP_TXP_QOS_CTRL (1781L)
+#define EPP_TXP_QOS_CTRL_ADR (1782L)
+#define EPP_TXP_QOS_CTRL_CNT (1783L)
+#define EPP_TXP_QOS_DATA (1784L)
+#define EPP_TXP_QOS_DATA_BS (1785L)
+#define EPP_TXP_QOS_DATA_EN (1786L)
+#define EPP_TXP_QOS_DATA_IR (1787L)
+#define EPP_TXP_QOS_DATA_IR_FRACTION (1788L)
+#define EPP_VPORT_QOS_CTRL (1789L)
+#define EPP_VPORT_QOS_CTRL_ADR (1790L)
+#define EPP_VPORT_QOS_CTRL_CNT (1791L)
+#define EPP_VPORT_QOS_DATA (1792L)
+#define EPP_VPORT_QOS_DATA_BS (1793L)
+#define EPP_VPORT_QOS_DATA_EN (1794L)
+#define EPP_VPORT_QOS_DATA_IR (1795L)
+#define EPP_VPORT_QOS_DATA_IR_FRACTION (1796L)
+/* EQM */
+#define EQM_CTRL (1797L)
+#define EQM_CTRL_DBG_CRC_ERR (1798L)
+#define EQM_CTRL_DBG_FORCE_ERR (1799L)
+#define EQM_CTRL_DBG_RMT_ERR (1800L)
+#define EQM_CTRL_DBG_SYNC_ERR (1801L)
+#define EQM_CTRL_ENABLE (1802L)
+#define EQM_CTRL_MODE (1803L)
+#define EQM_CTRL_PP_RST (1804L)
+#define EQM_DBG (1805L)
+#define EQM_DBG_FIFO_OF (1806L)
+#define EQM_DBG_LCL_EGS_QKA_OF (1807L)
+#define EQM_DBG_LCL_EGS_QLVL_OF (1808L)
+#define EQM_DBG_QBLK_CREDITS (1809L)
+#define EQM_STATUS (1810L)
+#define EQM_STATUS_LCL_EGS_OF_ERR (1811L)
+#define EQM_STATUS_NIF_CRC_ERR (1812L)
+#define EQM_STATUS_NIF_PP_LOOP_LCK (1813L)
+#define EQM_STATUS_NIF_RX_OF_ERR (1814L)
+#define EQM_STATUS_NIF_SYNC_ERR (1815L)
+#define EQM_STATUS_QM_CRC_ERR (1816L)
+#define EQM_STATUS_RMT_EGS_OF_ERR (1817L)
+#define EQM_STATUS_RMT_ERR (1818L)
+#define EQM_STATUS_RMT_IGS_OF_ERR (1819L)
+/* FHM */
+#define FHM_BACK_PRESSURE (1820L)
+#define FHM_BACK_PRESSURE_NIF (1821L)
+#define FHM_BACK_PRESSURE_RMC (1822L)
+#define FHM_BACK_PRESSURE_RMC_S (1823L)
+#define FHM_CRC_ERROR_NIF (1824L)
+#define FHM_CRC_ERROR_NIF_CNT (1825L)
+#define FHM_CRC_ERROR_SDC (1826L)
+#define FHM_CRC_ERROR_SDC_CNT (1827L)
+#define FHM_CTRL (1828L)
+#define FHM_CTRL_CNT_CLR (1829L)
+#define FHM_CTRL_ENABLE (1830L)
+#define FHM_CTRL_MODE (1831L)
+#define FHM_DEBUG_CRC (1832L)
+#define FHM_DEBUG_CRC_FORCE_ERROR (1833L)
+#define FHM_DEBUG_SDRAM_SIZE (1834L)
+#define FHM_DEBUG_SDRAM_SIZE_MASK (1835L)
+#define FHM_FILL_LEVEL (1836L)
+#define FHM_FILL_LEVEL_CELLS (1837L)
+#define FHM_MAC_MICRO_DROP (1838L)
+#define FHM_MAC_MICRO_DROP_CNT (1839L)
+#define FHM_MAX_FILL_LEVEL (1840L)
+#define FHM_MAX_FILL_LEVEL_CELLS (1841L)
+#define FHM_PKT_DROP (1842L)
+#define FHM_PKT_DROP_CNT (1843L)
+#define FHM_PKT_DROP_BYTES (1844L)
+#define FHM_PKT_DROP_BYTES_CNT (1845L)
+/* FLM */
+#define FLM_BUF_CTRL (1855L)
+#define FLM_BUF_CTRL_INF_AVAIL (1856L)
+#define FLM_BUF_CTRL_LRN_FREE (1857L)
+#define FLM_BUF_CTRL_STA_AVAIL (1858L)
+#define FLM_CONTROL (1859L)
+#define FLM_CONTROL_CRCRD (1860L)
+#define FLM_CONTROL_CRCWR (1861L)
+#define FLM_CONTROL_EAB (1862L)
+#define FLM_CONTROL_ENABLE (1863L)
+#define FLM_CONTROL_INIT (1864L)
+#define FLM_CONTROL_LDS (1865L)
+#define FLM_CONTROL_LFS (1866L)
+#define FLM_CONTROL_LIS (1867L)
+#define FLM_CONTROL_PDS (1868L)
+#define FLM_CONTROL_PIS (1869L)
+#define FLM_CONTROL_RBL (1870L)
+#define FLM_CONTROL_RDS (1871L)
+#define FLM_CONTROL_RIS (1872L)
+#define FLM_CONTROL_SPLIT_SDRAM_USAGE (1873L)
+#define FLM_CONTROL_UDS (1874L)
+#define FLM_CONTROL_UIS (1875L)
+#define FLM_CONTROL_WPD (1876L)
+#define FLM_INF_DATA (1877L)
+#define FLM_INF_DATA_BYTES (1878L)
+#define FLM_INF_DATA_BYT_A (1879L)
+#define FLM_INF_DATA_BYT_B (1880L)
+#define FLM_INF_DATA_CAUSE (1881L)
+#define FLM_INF_DATA_EOR (1882L)
+#define FLM_INF_DATA_ID (1883L)
+#define FLM_INF_DATA_PACKETS (1884L)
+#define FLM_INF_DATA_PCK_A (1885L)
+#define FLM_INF_DATA_PCK_B (1886L)
+#define FLM_INF_DATA_RTX_A (1887L)
+#define FLM_INF_DATA_RTX_B (1888L)
+#define FLM_INF_DATA_TCP_A (1889L)
+#define FLM_INF_DATA_TCP_B (1890L)
+#define FLM_INF_DATA_TS (1891L)
+#define FLM_LOAD_APS (1892L)
+#define FLM_LOAD_APS_APS (1893L)
+#define FLM_LOAD_BIN (1894L)
+#define FLM_LOAD_BIN_BIN (1895L)
+#define FLM_LOAD_LPS (1896L)
+#define FLM_LOAD_LPS_LPS (1897L)
+#define FLM_LOAD_PPS (1898L)
+#define FLM_LOAD_PPS_PPS (1899L)
+#define FLM_LRN_CTRL (1900L)
+#define FLM_LRN_CTRL_FREE (1901L)
+#define FLM_LRN_DATA (1902L)
+#define FLM_LRN_DATA_ADJ (1903L)
+#define FLM_LRN_DATA_COLOR (1904L)
+#define FLM_LRN_DATA_DSCP (1905L)
+#define FLM_LRN_DATA_ENT (1906L)
+#define FLM_LRN_DATA_EOR (1907L)
+#define FLM_LRN_DATA_FILL (1908L)
+#define FLM_LRN_DATA_FT (1909L)
+#define FLM_LRN_DATA_FT_MBR (1910L)
+#define FLM_LRN_DATA_FT_MISS (1911L)
+#define FLM_LRN_DATA_GFI (1912L)
+#define FLM_LRN_DATA_ID (1913L)
+#define FLM_LRN_DATA_KID (1914L)
+#define FLM_LRN_DATA_MBR_ID1 (1915L)
+#define FLM_LRN_DATA_MBR_ID2 (1916L)
+#define FLM_LRN_DATA_MBR_ID3 (1917L)
+#define FLM_LRN_DATA_MBR_ID4 (1918L)
+#define FLM_LRN_DATA_NAT_EN (1919L)
+#define FLM_LRN_DATA_NAT_IP (1920L)
+#define FLM_LRN_DATA_NAT_PORT (1921L)
+#define FLM_LRN_DATA_OP (1922L)
+#define FLM_LRN_DATA_PRIO (1923L)
+#define FLM_LRN_DATA_PROT (1924L)
+#define FLM_LRN_DATA_QFI (1925L)
+#define FLM_LRN_DATA_QW0 (1926L)
+#define FLM_LRN_DATA_QW4 (1927L)
+#define FLM_LRN_DATA_RATE (1928L)
+#define FLM_LRN_DATA_RQI (1929L)
+#define FLM_LRN_DATA_SIZE (1930L)
+#define FLM_LRN_DATA_STAT_PROF (1931L)
+#define FLM_LRN_DATA_SW8 (1932L)
+#define FLM_LRN_DATA_SW9 (1933L)
+#define FLM_LRN_DATA_TAU (1934L)
+#define FLM_LRN_DATA_TEID (1935L)
+#define FLM_LRN_DATA_TTL (1936L)
+#define FLM_LRN_DATA_VOL_IDX (1937L)
+#define FLM_PRIO (1938L)
+#define FLM_PRIO_FT0 (1939L)
+#define FLM_PRIO_FT1 (1940L)
+#define FLM_PRIO_FT2 (1941L)
+#define FLM_PRIO_FT3 (1942L)
+#define FLM_PRIO_LIMIT0 (1943L)
+#define FLM_PRIO_LIMIT1 (1944L)
+#define FLM_PRIO_LIMIT2 (1945L)
+#define FLM_PRIO_LIMIT3 (1946L)
+#define FLM_PST_CTRL (1947L)
+#define FLM_PST_CTRL_ADR (1948L)
+#define FLM_PST_CTRL_CNT (1949L)
+#define FLM_PST_DATA (1950L)
+#define FLM_PST_DATA_BP (1951L)
+#define FLM_PST_DATA_PP (1952L)
+#define FLM_PST_DATA_TP (1953L)
+#define FLM_RCP_CTRL (1954L)
+#define FLM_RCP_CTRL_ADR (1955L)
+#define FLM_RCP_CTRL_CNT (1956L)
+#define FLM_RCP_DATA (1957L)
+#define FLM_RCP_DATA_A (1958L)
+#define FLM_RCP_DATA_AUTO_IPV4_MASK (1959L)
+#define FLM_RCP_DATA_B (1960L)
+#define FLM_RCP_DATA_BYT_DYN (1961L)
+#define FLM_RCP_DATA_BYT_OFS (1962L)
+#define FLM_RCP_DATA_IPN (1963L)
+#define FLM_RCP_DATA_ITF (1964L)
+#define FLM_RCP_DATA_KID (1965L)
+#define FLM_RCP_DATA_LOOKUP (1966L)
+#define FLM_RCP_DATA_MASK (1967L)
+#define FLM_RCP_DATA_OPN (1968L)
+#define FLM_RCP_DATA_QW0_DYN (1969L)
+#define FLM_RCP_DATA_QW0_OFS (1970L)
+#define FLM_RCP_DATA_QW0_SEL (1971L)
+#define FLM_RCP_DATA_QW4_DYN (1972L)
+#define FLM_RCP_DATA_QW4_OFS (1973L)
+#define FLM_RCP_DATA_SW8_DYN (1974L)
+#define FLM_RCP_DATA_SW8_OFS (1975L)
+#define FLM_RCP_DATA_SW8_SEL (1976L)
+#define FLM_RCP_DATA_SW9_DYN (1977L)
+#define FLM_RCP_DATA_SW9_OFS (1978L)
+#define FLM_RCP_DATA_TXPLM (1979L)
+#define FLM_SCRUB (1980L)
+#define FLM_SCRUB_I (1981L)
+#define FLM_STATUS (1982L)
+#define FLM_STATUS_CALIBDONE (1983L)
+#define FLM_STATUS_CRCERR (1984L)
+#define FLM_STATUS_CRITICAL (1985L)
+#define FLM_STATUS_EFT_BP (1986L)
+#define FLM_STATUS_EFT_EVICT_BP (1987L)
+#define FLM_STATUS_IDLE (1988L)
+#define FLM_STATUS_INITDONE (1989L)
+#define FLM_STATUS_PANIC (1990L)
+#define FLM_STAT_AUL_DONE (1991L)
+#define FLM_STAT_AUL_DONE_CNT (1992L)
+#define FLM_STAT_AUL_FAIL (1993L)
+#define FLM_STAT_AUL_FAIL_CNT (1994L)
+#define FLM_STAT_AUL_IGNORE (1995L)
+#define FLM_STAT_AUL_IGNORE_CNT (1996L)
+#define FLM_STAT_CSH_HIT (1997L)
+#define FLM_STAT_CSH_HIT_CNT (1998L)
+#define FLM_STAT_CSH_MISS (1999L)
+#define FLM_STAT_CSH_MISS_CNT (2000L)
+#define FLM_STAT_CSH_UNH (2001L)
+#define FLM_STAT_CSH_UNH_CNT (2002L)
+#define FLM_STAT_CUC_MOVE (2003L)
+#define FLM_STAT_CUC_MOVE_CNT (2004L)
+#define FLM_STAT_CUC_START (2005L)
+#define FLM_STAT_CUC_START_CNT (2006L)
+#define FLM_STAT_FLOWS (2007L)
+#define FLM_STAT_FLOWS_CNT (2008L)
+#define FLM_STAT_INF_DONE (2009L)
+#define FLM_STAT_INF_DONE_CNT (2010L)
+#define FLM_STAT_INF_SKIP (2011L)
+#define FLM_STAT_INF_SKIP_CNT (2012L)
+#define FLM_STAT_LRN_DONE (2013L)
+#define FLM_STAT_LRN_DONE_CNT (2014L)
+#define FLM_STAT_LRN_FAIL (2015L)
+#define FLM_STAT_LRN_FAIL_CNT (2016L)
+#define FLM_STAT_LRN_IGNORE (2017L)
+#define FLM_STAT_LRN_IGNORE_CNT (2018L)
+#define FLM_STAT_PCK_DIS (2019L)
+#define FLM_STAT_PCK_DIS_CNT (2020L)
+#define FLM_STAT_PCK_HIT (2021L)
+#define FLM_STAT_PCK_HIT_CNT (2022L)
+#define FLM_STAT_PCK_MISS (2023L)
+#define FLM_STAT_PCK_MISS_CNT (2024L)
+#define FLM_STAT_PCK_UNH (2025L)
+#define FLM_STAT_PCK_UNH_CNT (2026L)
+#define FLM_STAT_PRB_DONE (2027L)
+#define FLM_STAT_PRB_DONE_CNT (2028L)
+#define FLM_STAT_PRB_IGNORE (2029L)
+#define FLM_STAT_PRB_IGNORE_CNT (2030L)
+#define FLM_STAT_REL_DONE (2031L)
+#define FLM_STAT_REL_DONE_CNT (2032L)
+#define FLM_STAT_REL_IGNORE (2033L)
+#define FLM_STAT_REL_IGNORE_CNT (2034L)
+#define FLM_STAT_STA_DONE (2035L)
+#define FLM_STAT_STA_DONE_CNT (2036L)
+#define FLM_STAT_TUL_DONE (2037L)
+#define FLM_STAT_TUL_DONE_CNT (2038L)
+#define FLM_STAT_UNL_DONE (2039L)
+#define FLM_STAT_UNL_DONE_CNT (2040L)
+#define FLM_STAT_UNL_IGNORE (2041L)
+#define FLM_STAT_UNL_IGNORE_CNT (2042L)
+#define FLM_STA_DATA (2043L)
+#define FLM_STA_DATA_EOR (2044L)
+#define FLM_STA_DATA_ID (2045L)
+#define FLM_STA_DATA_LDS (2046L)
+#define FLM_STA_DATA_LFS (2047L)
+#define FLM_STA_DATA_LIS (2048L)
+#define FLM_STA_DATA_PDS (2049L)
+#define FLM_STA_DATA_PIS (2050L)
+#define FLM_STA_DATA_RDS (2051L)
+#define FLM_STA_DATA_RIS (2052L)
+#define FLM_STA_DATA_UDS (2053L)
+#define FLM_STA_DATA_UIS (2054L)
+#define FLM_TIMEOUT (2055L)
+#define FLM_TIMEOUT_T (2056L)
+#define FLM_TRSWIN (2057L)
+#define FLM_TRSWIN_S (2058L)
+#define FLM_TRTWIN (2059L)
+#define FLM_TRTWIN_T (2060L)
+/* GFG */
+#define GFG_BURSTSIZE0 (2061L)
+#define GFG_BURSTSIZE0_VAL (2062L)
+#define GFG_BURSTSIZE1 (2063L)
+#define GFG_BURSTSIZE1_VAL (2064L)
+#define GFG_BURSTSIZE2 (2065L)
+#define GFG_BURSTSIZE2_VAL (2066L)
+#define GFG_BURSTSIZE3 (2067L)
+#define GFG_BURSTSIZE3_VAL (2068L)
+#define GFG_BURSTSIZE4 (2069L)
+#define GFG_BURSTSIZE4_VAL (2070L)
+#define GFG_BURSTSIZE5 (2071L)
+#define GFG_BURSTSIZE5_VAL (2072L)
+#define GFG_BURSTSIZE6 (2073L)
+#define GFG_BURSTSIZE6_VAL (2074L)
+#define GFG_BURSTSIZE7 (2075L)
+#define GFG_BURSTSIZE7_VAL (2076L)
+#define GFG_CTRL0 (2077L)
+#define GFG_CTRL0_ENABLE (2078L)
+#define GFG_CTRL0_MODE (2079L)
+#define GFG_CTRL0_PRBS_EN (2080L)
+#define GFG_CTRL0_SIZE (2081L)
+#define GFG_CTRL1 (2082L)
+#define GFG_CTRL1_ENABLE (2083L)
+#define GFG_CTRL1_MODE (2084L)
+#define GFG_CTRL1_PRBS_EN (2085L)
+#define GFG_CTRL1_SIZE (2086L)
+#define GFG_CTRL2 (2087L)
+#define GFG_CTRL2_ENABLE (2088L)
+#define GFG_CTRL2_MODE (2089L)
+#define GFG_CTRL2_PRBS_EN (2090L)
+#define GFG_CTRL2_SIZE (2091L)
+#define GFG_CTRL3 (2092L)
+#define GFG_CTRL3_ENABLE (2093L)
+#define GFG_CTRL3_MODE (2094L)
+#define GFG_CTRL3_PRBS_EN (2095L)
+#define GFG_CTRL3_SIZE (2096L)
+#define GFG_CTRL4 (2097L)
+#define GFG_CTRL4_ENABLE (2098L)
+#define GFG_CTRL4_MODE (2099L)
+#define GFG_CTRL4_PRBS_EN (2100L)
+#define GFG_CTRL4_SIZE (2101L)
+#define GFG_CTRL5 (2102L)
+#define GFG_CTRL5_ENABLE (2103L)
+#define GFG_CTRL5_MODE (2104L)
+#define GFG_CTRL5_PRBS_EN (2105L)
+#define GFG_CTRL5_SIZE (2106L)
+#define GFG_CTRL6 (2107L)
+#define GFG_CTRL6_ENABLE (2108L)
+#define GFG_CTRL6_MODE (2109L)
+#define GFG_CTRL6_PRBS_EN (2110L)
+#define GFG_CTRL6_SIZE (2111L)
+#define GFG_CTRL7 (2112L)
+#define GFG_CTRL7_ENABLE (2113L)
+#define GFG_CTRL7_MODE (2114L)
+#define GFG_CTRL7_PRBS_EN (2115L)
+#define GFG_CTRL7_SIZE (2116L)
+#define GFG_RUN0 (2117L)
+#define GFG_RUN0_RUN (2118L)
+#define GFG_RUN1 (2119L)
+#define GFG_RUN1_RUN (2120L)
+#define GFG_RUN2 (2121L)
+#define GFG_RUN2_RUN (2122L)
+#define GFG_RUN3 (2123L)
+#define GFG_RUN3_RUN (2124L)
+#define GFG_RUN4 (2125L)
+#define GFG_RUN4_RUN (2126L)
+#define GFG_RUN5 (2127L)
+#define GFG_RUN5_RUN (2128L)
+#define GFG_RUN6 (2129L)
+#define GFG_RUN6_RUN (2130L)
+#define GFG_RUN7 (2131L)
+#define GFG_RUN7_RUN (2132L)
+#define GFG_SIZEMASK0 (2133L)
+#define GFG_SIZEMASK0_VAL (2134L)
+#define GFG_SIZEMASK1 (2135L)
+#define GFG_SIZEMASK1_VAL (2136L)
+#define GFG_SIZEMASK2 (2137L)
+#define GFG_SIZEMASK2_VAL (2138L)
+#define GFG_SIZEMASK3 (2139L)
+#define GFG_SIZEMASK3_VAL (2140L)
+#define GFG_SIZEMASK4 (2141L)
+#define GFG_SIZEMASK4_VAL (2142L)
+#define GFG_SIZEMASK5 (2143L)
+#define GFG_SIZEMASK5_VAL (2144L)
+#define GFG_SIZEMASK6 (2145L)
+#define GFG_SIZEMASK6_VAL (2146L)
+#define GFG_SIZEMASK7 (2147L)
+#define GFG_SIZEMASK7_VAL (2148L)
+#define GFG_STREAMID0 (2149L)
+#define GFG_STREAMID0_VAL (2150L)
+#define GFG_STREAMID1 (2151L)
+#define GFG_STREAMID1_VAL (2152L)
+#define GFG_STREAMID2 (2153L)
+#define GFG_STREAMID2_VAL (2154L)
+#define GFG_STREAMID3 (2155L)
+#define GFG_STREAMID3_VAL (2156L)
+#define GFG_STREAMID4 (2157L)
+#define GFG_STREAMID4_VAL (2158L)
+#define GFG_STREAMID5 (2159L)
+#define GFG_STREAMID5_VAL (2160L)
+#define GFG_STREAMID6 (2161L)
+#define GFG_STREAMID6_VAL (2162L)
+#define GFG_STREAMID7 (2163L)
+#define GFG_STREAMID7_VAL (2164L)
+/* GMF */
+#define GMF_CTRL (2165L)
+#define GMF_CTRL_ENABLE (2166L)
+#define GMF_CTRL_FCS_ALWAYS (2167L)
+#define GMF_CTRL_IFG_AUTO_ADJUST_ENABLE (2168L)
+#define GMF_CTRL_IFG_ENABLE (2169L)
+#define GMF_CTRL_IFG_TX_NOW_ALWAYS (2170L)
+#define GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE (2171L)
+#define GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK (2172L)
+#define GMF_CTRL_IFG_TX_ON_TS_ALWAYS (2173L)
+#define GMF_CTRL_TS_INJECT_ALWAYS (2174L)
+#define GMF_CTRL_TS_INJECT_DUAL_STEP (2175L)
+#define GMF_DEBUG_LANE_MARKER (2176L)
+#define GMF_DEBUG_LANE_MARKER_COMPENSATION (2177L)
+#define GMF_IFG_MAX_ADJUST_SLACK (2178L)
+#define GMF_IFG_MAX_ADJUST_SLACK_SLACK (2179L)
+#define GMF_IFG_SET_CLOCK_DELTA (2180L)
+#define GMF_IFG_SET_CLOCK_DELTA_DELTA (2181L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST (2182L)
+#define GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA (2183L)
+#define GMF_IFG_TX_NOW_ON_TS (2184L)
+#define GMF_IFG_TX_NOW_ON_TS_TS (2185L)
+#define GMF_SPEED (2186L)
+#define GMF_SPEED_IFG_SPEED (2187L)
+#define GMF_STAT (2188L)
+#define GMF_STAT_CTRL_EMPTY (2189L)
+#define GMF_STAT_DATA_CTRL_EMPTY (2190L)
+#define GMF_STAT_SB_EMPTY (2191L)
+#define GMF_STAT_CTRL (2192L)
+#define GMF_STAT_CTRL_FILL_LEVEL (2193L)
+#define GMF_STAT_DATA0 (2194L)
+#define GMF_STAT_DATA0_EMPTY (2195L)
+#define GMF_STAT_DATA1 (2196L)
+#define GMF_STAT_DATA1_EMPTY (2197L)
+#define GMF_STAT_DATA_BUFFER (2198L)
+#define GMF_STAT_DATA_BUFFER_FREE (2199L)
+#define GMF_STAT_DATA_BUFFER_USED (2200L)
+#define GMF_STAT_MAX_DELAYED_PKT (2201L)
+#define GMF_STAT_MAX_DELAYED_PKT_NS (2202L)
+#define GMF_STAT_NEXT_PKT (2203L)
+#define GMF_STAT_NEXT_PKT_NS (2204L)
+#define GMF_STAT_STICKY (2205L)
+#define GMF_STAT_STICKY_DATA_UNDERFLOWED (2206L)
+#define GMF_STAT_STICKY_IFG_ADJUSTED (2207L)
+#define GMF_TS_INJECT (2208L)
+#define GMF_TS_INJECT_OFFSET (2209L)
+#define GMF_TS_INJECT_POS (2210L)
+/* GPIO_PHY */
+#define GPIO_PHY_CFG (2211L)
+#define GPIO_PHY_CFG_E_PORT0_RXLOS (2212L)
+#define GPIO_PHY_CFG_E_PORT1_RXLOS (2213L)
+#define GPIO_PHY_CFG_PORT0_INT_B (2214L)
+#define GPIO_PHY_CFG_PORT0_LPMODE (2215L)
+#define GPIO_PHY_CFG_PORT0_MODPRS_B (2216L)
+#define GPIO_PHY_CFG_PORT0_PLL_INTR (2217L)
+#define GPIO_PHY_CFG_PORT0_RESET_B (2218L)
+#define GPIO_PHY_CFG_PORT1_INT_B (2219L)
+#define GPIO_PHY_CFG_PORT1_LPMODE (2220L)
+#define GPIO_PHY_CFG_PORT1_MODPRS_B (2221L)
+#define GPIO_PHY_CFG_PORT1_PLL_INTR (2222L)
+#define GPIO_PHY_CFG_PORT1_RESET_B (2223L)
+#define GPIO_PHY_GPIO (2224L)
+#define GPIO_PHY_GPIO_E_PORT0_RXLOS (2225L)
+#define GPIO_PHY_GPIO_E_PORT1_RXLOS (2226L)
+#define GPIO_PHY_GPIO_PORT0_INT_B (2227L)
+#define GPIO_PHY_GPIO_PORT0_LPMODE (2228L)
+#define GPIO_PHY_GPIO_PORT0_MODPRS_B (2229L)
+#define GPIO_PHY_GPIO_PORT0_PLL_INTR (2230L)
+#define GPIO_PHY_GPIO_PORT0_RESET_B (2231L)
+#define GPIO_PHY_GPIO_PORT1_INT_B (2232L)
+#define GPIO_PHY_GPIO_PORT1_LPMODE (2233L)
+#define GPIO_PHY_GPIO_PORT1_MODPRS_B (2234L)
+#define GPIO_PHY_GPIO_PORT1_PLL_INTR (2235L)
+#define GPIO_PHY_GPIO_PORT1_RESET_B (2236L)
+/* GPIO_PHY_PORTS */
+#define GPIO_PHY_PORTS_CFG (2237L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_RXLOS (2238L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXDISABLE (2239L)
+#define GPIO_PHY_PORTS_CFG_E_PORT0_TXFAULT (2240L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_RXLOS (2241L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXDISABLE (2242L)
+#define GPIO_PHY_PORTS_CFG_E_PORT1_TXFAULT (2243L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_RXLOS (2244L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXDISABLE (2245L)
+#define GPIO_PHY_PORTS_CFG_E_PORT2_TXFAULT (2246L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_RXLOS (2247L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXDISABLE (2248L)
+#define GPIO_PHY_PORTS_CFG_E_PORT3_TXFAULT (2249L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_RXLOS (2250L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXDISABLE (2251L)
+#define GPIO_PHY_PORTS_CFG_E_PORT4_TXFAULT (2252L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_RXLOS (2253L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXDISABLE (2254L)
+#define GPIO_PHY_PORTS_CFG_E_PORT5_TXFAULT (2255L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_RXLOS (2256L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXDISABLE (2257L)
+#define GPIO_PHY_PORTS_CFG_E_PORT6_TXFAULT (2258L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_RXLOS (2259L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXDISABLE (2260L)
+#define GPIO_PHY_PORTS_CFG_E_PORT7_TXFAULT (2261L)
+#define GPIO_PHY_PORTS_GPIO (2262L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_RXLOS (2263L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXDISABLE (2264L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT0_TXFAULT (2265L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_RXLOS (2266L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXDISABLE (2267L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT1_TXFAULT (2268L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_RXLOS (2269L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXDISABLE (2270L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT2_TXFAULT (2271L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_RXLOS (2272L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXDISABLE (2273L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT3_TXFAULT (2274L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_RXLOS (2275L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXDISABLE (2276L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT4_TXFAULT (2277L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_RXLOS (2278L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXDISABLE (2279L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT5_TXFAULT (2280L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_RXLOS (2281L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXDISABLE (2282L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT6_TXFAULT (2283L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_RXLOS (2284L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXDISABLE (2285L)
+#define GPIO_PHY_PORTS_GPIO_E_PORT7_TXFAULT (2286L)
+/* GPIO_SFPP */
+#define GPIO_SFPP_CFG (2287L)
+#define GPIO_SFPP_CFG_ABS (2288L)
+#define GPIO_SFPP_CFG_RS (2289L)
+#define GPIO_SFPP_CFG_RXLOS (2290L)
+#define GPIO_SFPP_CFG_TXDISABLE (2291L)
+#define GPIO_SFPP_CFG_TXFAULT (2292L)
+#define GPIO_SFPP_GPIO (2293L)
+#define GPIO_SFPP_GPIO_ABS (2294L)
+#define GPIO_SFPP_GPIO_RS (2295L)
+#define GPIO_SFPP_GPIO_RXLOS (2296L)
+#define GPIO_SFPP_GPIO_TXDISABLE (2297L)
+#define GPIO_SFPP_GPIO_TXFAULT (2298L)
+/* HFU */
+#define HFU_RCP_CTRL (2381L)
+#define HFU_RCP_CTRL_ADR (2382L)
+#define HFU_RCP_CTRL_CNT (2383L)
+#define HFU_RCP_DATA (2384L)
+#define HFU_RCP_DATA_CSINF (2385L)
+#define HFU_RCP_DATA_IL3OFS (2386L)
+#define HFU_RCP_DATA_IL4OFS (2387L)
+#define HFU_RCP_DATA_L3FRAG (2388L)
+#define HFU_RCP_DATA_L3PRT (2389L)
+#define HFU_RCP_DATA_L4PRT (2390L)
+#define HFU_RCP_DATA_LEN_A_ADD_DYN (2391L)
+#define HFU_RCP_DATA_LEN_A_ADD_OFS (2392L)
+#define HFU_RCP_DATA_LEN_A_OL4LEN (2393L)
+#define HFU_RCP_DATA_LEN_A_POS_DYN (2394L)
+#define HFU_RCP_DATA_LEN_A_POS_OFS (2395L)
+#define HFU_RCP_DATA_LEN_A_SUB_DYN (2396L)
+#define HFU_RCP_DATA_LEN_A_WR (2397L)
+#define HFU_RCP_DATA_LEN_B_ADD_DYN (2398L)
+#define HFU_RCP_DATA_LEN_B_ADD_OFS (2399L)
+#define HFU_RCP_DATA_LEN_B_POS_DYN (2400L)
+#define HFU_RCP_DATA_LEN_B_POS_OFS (2401L)
+#define HFU_RCP_DATA_LEN_B_SUB_DYN (2402L)
+#define HFU_RCP_DATA_LEN_B_WR (2403L)
+#define HFU_RCP_DATA_LEN_C_ADD_DYN (2404L)
+#define HFU_RCP_DATA_LEN_C_ADD_OFS (2405L)
+#define HFU_RCP_DATA_LEN_C_POS_DYN (2406L)
+#define HFU_RCP_DATA_LEN_C_POS_OFS (2407L)
+#define HFU_RCP_DATA_LEN_C_SUB_DYN (2408L)
+#define HFU_RCP_DATA_LEN_C_WR (2409L)
+#define HFU_RCP_DATA_OL3OFS (2410L)
+#define HFU_RCP_DATA_OL4OFS (2411L)
+#define HFU_RCP_DATA_TTL_POS_DYN (2412L)
+#define HFU_RCP_DATA_TTL_POS_OFS (2413L)
+#define HFU_RCP_DATA_TTL_WR (2414L)
+#define HFU_RCP_DATA_TUNNEL (2415L)
+/* HIF */
+#define HIF_BUILD_TIME (2416L)
+#define HIF_BUILD_TIME_TIME (2417L)
+#define HIF_CONFIG (2418L)
+#define HIF_CONFIG_EXT_TAG (2419L)
+#define HIF_CONFIG_MAX_READ (2420L)
+#define HIF_CONFIG_MAX_TLP (2421L)
+#define HIF_CONTROL (2422L)
+#define HIF_CONTROL_BLESSED (2423L)
+#define HIF_CONTROL_WRAW (2424L)
+#define HIF_PROD_ID_EX (2425L)
+#define HIF_PROD_ID_EX_LAYOUT (2426L)
+#define HIF_PROD_ID_EX_LAYOUT_VERSION (2427L)
+#define HIF_PROD_ID_EX_RESERVED (2428L)
+#define HIF_PROD_ID_EXT (2429L)
+#define HIF_PROD_ID_EXT_LAYOUT (2430L)
+#define HIF_PROD_ID_EXT_LAYOUT_VERSION (2431L)
+#define HIF_PROD_ID_EXT_RESERVED (2432L)
+#define HIF_PROD_ID_LSB (2433L)
+#define HIF_PROD_ID_LSB_GROUP_ID (2434L)
+#define HIF_PROD_ID_LSB_REV_ID (2435L)
+#define HIF_PROD_ID_LSB_VER_ID (2436L)
+#define HIF_PROD_ID_MSB (2437L)
+#define HIF_PROD_ID_MSB_BUILD_NO (2438L)
+#define HIF_PROD_ID_MSB_PATCH_NO (2439L)
+#define HIF_PROD_ID_MSB_TYPE_ID (2440L)
+#define HIF_SAMPLE_TIME (2441L)
+#define HIF_SAMPLE_TIME_SAMPLE_TIME (2442L)
+#define HIF_STATUS (2443L)
+#define HIF_STATUS_RD_ERR (2444L)
+#define HIF_STATUS_TAGS_IN_USE (2445L)
+#define HIF_STATUS_WR_ERR (2446L)
+#define HIF_STAT_CTRL (2447L)
+#define HIF_STAT_CTRL_STAT_ENA (2448L)
+#define HIF_STAT_CTRL_STAT_REQ (2449L)
+#define HIF_STAT_REFCLK (2450L)
+#define HIF_STAT_REFCLK_REFCLK250 (2451L)
+#define HIF_STAT_RX (2452L)
+#define HIF_STAT_RX_COUNTER (2453L)
+#define HIF_STAT_TX (2454L)
+#define HIF_STAT_TX_COUNTER (2455L)
+#define HIF_TEST0 (2456L)
+#define HIF_TEST0_DATA (2457L)
+#define HIF_TEST1 (2458L)
+#define HIF_TEST1_DATA (2459L)
+#define HIF_TEST2 (2460L)
+#define HIF_TEST2_DATA (2461L)
+#define HIF_TEST3 (2462L)
+#define HIF_TEST3_DATA (2463L)
+#define HIF_UUID0 (2464L)
+#define HIF_UUID0_UUID0 (2465L)
+#define HIF_UUID1 (2466L)
+#define HIF_UUID1_UUID1 (2467L)
+#define HIF_UUID2 (2468L)
+#define HIF_UUID2_UUID2 (2469L)
+#define HIF_UUID3 (2470L)
+#define HIF_UUID3_UUID3 (2471L)
+/* HSH */
+#define HSH_RCP_CTRL (2472L)
+#define HSH_RCP_CTRL_ADR (2473L)
+#define HSH_RCP_CTRL_CNT (2474L)
+#define HSH_RCP_DATA (2475L)
+#define HSH_RCP_DATA_AUTO_IPV4_MASK (2476L)
+#define HSH_RCP_DATA_HSH_TYPE (2477L)
+#define HSH_RCP_DATA_HSH_VALID (2478L)
+#define HSH_RCP_DATA_K (2479L)
+#define HSH_RCP_DATA_LOAD_DIST_TYPE (2480L)
+#define HSH_RCP_DATA_MAC_PORT_MASK (2481L)
+#define HSH_RCP_DATA_P_MASK (2482L)
+#define HSH_RCP_DATA_QW0_OFS (2483L)
+#define HSH_RCP_DATA_QW0_PE (2484L)
+#define HSH_RCP_DATA_QW4_OFS (2485L)
+#define HSH_RCP_DATA_QW4_PE (2486L)
+#define HSH_RCP_DATA_SEED (2487L)
+#define HSH_RCP_DATA_SORT (2488L)
+#define HSH_RCP_DATA_TNL_P (2489L)
+#define HSH_RCP_DATA_TOEPLITZ (2490L)
+#define HSH_RCP_DATA_W8_OFS (2491L)
+#define HSH_RCP_DATA_W8_PE (2492L)
+#define HSH_RCP_DATA_W8_SORT (2493L)
+#define HSH_RCP_DATA_W9_OFS (2494L)
+#define HSH_RCP_DATA_W9_P (2495L)
+#define HSH_RCP_DATA_W9_PE (2496L)
+#define HSH_RCP_DATA_W9_SORT (2497L)
+#define HSH_RCP_DATA_WORD_MASK (2498L)
+/* HST */
+#define HST_RCP_CTRL (2499L)
+#define HST_RCP_CTRL_ADR (2500L)
+#define HST_RCP_CTRL_CNT (2501L)
+#define HST_RCP_DATA (2502L)
+#define HST_RCP_DATA_END_DYN (2503L)
+#define HST_RCP_DATA_END_OFS (2504L)
+#define HST_RCP_DATA_MODIF0_CMD (2505L)
+#define HST_RCP_DATA_MODIF0_DYN (2506L)
+#define HST_RCP_DATA_MODIF0_OFS (2507L)
+#define HST_RCP_DATA_MODIF0_VALUE (2508L)
+#define HST_RCP_DATA_MODIF1_CMD (2509L)
+#define HST_RCP_DATA_MODIF1_DYN (2510L)
+#define HST_RCP_DATA_MODIF1_OFS (2511L)
+#define HST_RCP_DATA_MODIF1_VALUE (2512L)
+#define HST_RCP_DATA_MODIF2_CMD (2513L)
+#define HST_RCP_DATA_MODIF2_DYN (2514L)
+#define HST_RCP_DATA_MODIF2_OFS (2515L)
+#define HST_RCP_DATA_MODIF2_VALUE (2516L)
+#define HST_RCP_DATA_START_DYN (2517L)
+#define HST_RCP_DATA_START_OFS (2518L)
+#define HST_RCP_DATA_STRIP_MODE (2519L)
+/* ICORE_10G */
+#define ICORE_10G_INDIR_CTRL (2549L)
+#define ICORE_10G_INDIR_CTRL_RD_ENABLE (2550L)
+#define ICORE_10G_INDIR_CTRL_RD_READY (2551L)
+#define ICORE_10G_INDIR_CTRL_SUB_ADDR (2552L)
+#define ICORE_10G_INDIR_DATA (2553L)
+#define ICORE_10G_INDIR_DATA_DATA (2554L)
+#define ICORE_10G_MISC_CTRL (2555L)
+#define ICORE_10G_MISC_CTRL_LINE_LOOP (2556L)
+#define ICORE_10G_MISC_CTRL_LINK_LATENCY (2557L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_RX (2558L)
+#define ICORE_10G_MISC_CTRL_RES_MAC_TX (2559L)
+#define ICORE_10G_MISC_CTRL_SERIAL_LOOP (2560L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2DATA (2561L)
+#define ICORE_10G_MISC_CTRL_SET_LOCK2REF (2562L)
+#define ICORE_10G_PHY_STATUS (2563L)
+#define ICORE_10G_PHY_STATUS_BLOCK_LOCK (2564L)
+#define ICORE_10G_PHY_STATUS_HI_BER (2565L)
+#define ICORE_10G_PHY_STATUS_HI_BER_CHANGED (2566L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE (2567L)
+#define ICORE_10G_PHY_STATUS_LINK_STATE_CHANGED (2568L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED (2569L)
+#define ICORE_10G_PHY_STATUS_RXCLK_LOCKED2DATA (2570L)
+#define ICORE_10G_PHY_STATUS_RX_CAL_BUSY (2571L)
+#define ICORE_10G_PHY_STATUS_TXCLK_LOCKED (2572L)
+#define ICORE_10G_PHY_STATUS_TX_CAL_BUSY (2573L)
+#define ICORE_10G_TFG_ADDR (2574L)
+#define ICORE_10G_TFG_ADDR_ADR (2575L)
+#define ICORE_10G_TFG_ADDR_RDENA (2576L)
+#define ICORE_10G_TFG_ADDR_RD_DONE (2577L)
+#define ICORE_10G_TFG_CTRL (2578L)
+#define ICORE_10G_TFG_CTRL_ID_ENA (2579L)
+#define ICORE_10G_TFG_CTRL_ID_POS (2580L)
+#define ICORE_10G_TFG_CTRL_RESTART (2581L)
+#define ICORE_10G_TFG_CTRL_TG_ACT (2582L)
+#define ICORE_10G_TFG_CTRL_TG_ENA (2583L)
+#define ICORE_10G_TFG_CTRL_TIME_MODE (2584L)
+#define ICORE_10G_TFG_CTRL_WRAP (2585L)
+#define ICORE_10G_TFG_DATA (2586L)
+#define ICORE_10G_TFG_DATA_ID (2587L)
+#define ICORE_10G_TFG_DATA_LENGTH (2588L)
+#define ICORE_10G_TFG_FRAME_HDR_0 (2589L)
+#define ICORE_10G_TFG_FRAME_HDR_0_HDR (2590L)
+#define ICORE_10G_TFG_FRAME_HDR_1 (2591L)
+#define ICORE_10G_TFG_FRAME_HDR_1_HDR (2592L)
+#define ICORE_10G_TFG_FRAME_HDR_2 (2593L)
+#define ICORE_10G_TFG_FRAME_HDR_2_HDR (2594L)
+#define ICORE_10G_TFG_FRAME_HDR_3 (2595L)
+#define ICORE_10G_TFG_FRAME_HDR_3_HDR (2596L)
+#define ICORE_10G_TFG_REPETITION (2597L)
+#define ICORE_10G_TFG_REPETITION_CNT (2598L)
+/* IFR */
+#define IFR_RCP_CTRL (2642L)
+#define IFR_RCP_CTRL_ADR (2643L)
+#define IFR_RCP_CTRL_CNT (2644L)
+#define IFR_RCP_DATA (2645L)
+#define IFR_RCP_DATA_EN (2646L)
+#define IFR_RCP_DATA_MTU (2647L)
+/* IIC */
+#define IIC_ADR (2648L)
+#define IIC_ADR_SLV_ADR (2649L)
+#define IIC_CR (2650L)
+#define IIC_CR_EN (2651L)
+#define IIC_CR_GC_EN (2652L)
+#define IIC_CR_MSMS (2653L)
+#define IIC_CR_RST (2654L)
+#define IIC_CR_RSTA (2655L)
+#define IIC_CR_TX (2656L)
+#define IIC_CR_TXAK (2657L)
+#define IIC_CR_TXFIFO_RESET (2658L)
+#define IIC_DGIE (2659L)
+#define IIC_DGIE_GIE (2660L)
+#define IIC_GPO (2661L)
+#define IIC_GPO_GPO_VAL (2662L)
+#define IIC_IER (2663L)
+#define IIC_IER_INT0 (2664L)
+#define IIC_IER_INT1 (2665L)
+#define IIC_IER_INT2 (2666L)
+#define IIC_IER_INT3 (2667L)
+#define IIC_IER_INT4 (2668L)
+#define IIC_IER_INT5 (2669L)
+#define IIC_IER_INT6 (2670L)
+#define IIC_IER_INT7 (2671L)
+#define IIC_ISR (2672L)
+#define IIC_ISR_INT0 (2673L)
+#define IIC_ISR_INT1 (2674L)
+#define IIC_ISR_INT2 (2675L)
+#define IIC_ISR_INT3 (2676L)
+#define IIC_ISR_INT4 (2677L)
+#define IIC_ISR_INT5 (2678L)
+#define IIC_ISR_INT6 (2679L)
+#define IIC_ISR_INT7 (2680L)
+#define IIC_RX_FIFO (2681L)
+#define IIC_RX_FIFO_RXDATA (2682L)
+#define IIC_RX_FIFO_OCY (2683L)
+#define IIC_RX_FIFO_OCY_OCY_VAL (2684L)
+#define IIC_RX_FIFO_PIRQ (2685L)
+#define IIC_RX_FIFO_PIRQ_CMP_VAL (2686L)
+#define IIC_SOFTR (2687L)
+#define IIC_SOFTR_RKEY (2688L)
+#define IIC_SR (2689L)
+#define IIC_SR_AAS (2690L)
+#define IIC_SR_ABGC (2691L)
+#define IIC_SR_BB (2692L)
+#define IIC_SR_RXFIFO_EMPTY (2693L)
+#define IIC_SR_RXFIFO_FULL (2694L)
+#define IIC_SR_SRW (2695L)
+#define IIC_SR_TXFIFO_EMPTY (2696L)
+#define IIC_SR_TXFIFO_FULL (2697L)
+#define IIC_TBUF (2698L)
+#define IIC_TBUF_TBUF_VAL (2699L)
+#define IIC_TEN_ADR (2700L)
+#define IIC_TEN_ADR_MSB_SLV_ADR (2701L)
+#define IIC_THDDAT (2702L)
+#define IIC_THDDAT_THDDAT_VAL (2703L)
+#define IIC_THDSTA (2704L)
+#define IIC_THDSTA_THDSTA_VAL (2705L)
+#define IIC_THIGH (2706L)
+#define IIC_THIGH_THIGH_VAL (2707L)
+#define IIC_TLOW (2708L)
+#define IIC_TLOW_TLOW_VAL (2709L)
+#define IIC_TSUDAT (2710L)
+#define IIC_TSUDAT_TSUDAT_VAL (2711L)
+#define IIC_TSUSTA (2712L)
+#define IIC_TSUSTA_TSUSTA_VAL (2713L)
+#define IIC_TSUSTO (2714L)
+#define IIC_TSUSTO_TSUSTO_VAL (2715L)
+#define IIC_TX_FIFO (2716L)
+#define IIC_TX_FIFO_START (2717L)
+#define IIC_TX_FIFO_STOP (2718L)
+#define IIC_TX_FIFO_TXDATA (2719L)
+#define IIC_TX_FIFO_OCY (2720L)
+#define IIC_TX_FIFO_OCY_OCY_VAL (2721L)
+/* INS */
+#define INS_RCP_CTRL (2722L)
+#define INS_RCP_CTRL_ADR (2723L)
+#define INS_RCP_CTRL_CNT (2724L)
+#define INS_RCP_DATA (2725L)
+#define INS_RCP_DATA_DYN (2726L)
+#define INS_RCP_DATA_LEN (2727L)
+#define INS_RCP_DATA_OFS (2728L)
+/* IOA */
+#define IOA_RECIPE_CTRL (2778L)
+#define IOA_RECIPE_CTRL_ADR (2779L)
+#define IOA_RECIPE_CTRL_CNT (2780L)
+#define IOA_RECIPE_DATA (2781L)
+#define IOA_RECIPE_DATA_QUEUE_ID (2782L)
+#define IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN (2783L)
+#define IOA_RECIPE_DATA_TUNNEL_POP (2784L)
+#define IOA_RECIPE_DATA_VLAN_DEI (2785L)
+#define IOA_RECIPE_DATA_VLAN_PCP (2786L)
+#define IOA_RECIPE_DATA_VLAN_POP (2787L)
+#define IOA_RECIPE_DATA_VLAN_PUSH (2788L)
+#define IOA_RECIPE_DATA_VLAN_TPID_SEL (2789L)
+#define IOA_RECIPE_DATA_VLAN_VID (2790L)
+#define IOA_ROA_EPP_CTRL (2791L)
+#define IOA_ROA_EPP_CTRL_ADR (2792L)
+#define IOA_ROA_EPP_CTRL_CNT (2793L)
+#define IOA_ROA_EPP_DATA (2794L)
+#define IOA_ROA_EPP_DATA_PUSH_TUNNEL (2795L)
+#define IOA_ROA_EPP_DATA_TX_PORT (2796L)
+#define IOA_VLAN_TPID_SPECIAL (2797L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID0 (2798L)
+#define IOA_VLAN_TPID_SPECIAL_CUSTTPID1 (2799L)
+/* IPF */
+#define IPF_CTRL (2800L)
+#define IPF_CTRL_ALL_UNM (2801L)
+#define IPF_CTRL_ALL_UNM_INNER (2802L)
+#define IPF_CTRL_DEL_UNM (2803L)
+#define IPF_CTRL_ENABLE (2804L)
+#define IPF_CTRL_FST_UNM (2805L)
+#define IPF_CTRL_PASSIVE (2806L)
+#define IPF_CTRL_PERSIST (2807L)
+#define IPF_DEBUG (2808L)
+#define IPF_DEBUG_FTF_N (2809L)
+#define IPF_DEBUG_LIMIT_N (2810L)
+#define IPF_EXPIRE (2811L)
+#define IPF_EXPIRE_PERSIST (2812L)
+#define IPF_EXPIRE_T (2813L)
+#define IPF_FTF_DEBUG (2814L)
+#define IPF_FTF_DEBUG_N (2815L)
+#define IPF_RCP_CTRL (2816L)
+#define IPF_RCP_CTRL_ADR (2817L)
+#define IPF_RCP_CTRL_CNT (2818L)
+#define IPF_RCP_DATA (2819L)
+#define IPF_RCP_DATA_ALL_UNM (2820L)
+#define IPF_RCP_DATA_COL_INH (2821L)
+#define IPF_RCP_DATA_DEL_UNM (2822L)
+#define IPF_RCP_DATA_DISC_INH (2823L)
+#define IPF_RCP_DATA_DUP_INH (2824L)
+#define IPF_RCP_DATA_ENABLE (2825L)
+#define IPF_RCP_DATA_FST_UNM (2826L)
+#define IPF_RCP_DATA_GROUP_ID (2827L)
+#define IPF_RCP_DATA_HASH_CENC (2828L)
+#define IPF_RCP_DATA_HSH_INH (2829L)
+#define IPF_RCP_DATA_PORT_GROUP_ID (2830L)
+#define IPF_RCP_DATA_QUEUE_INH (2831L)
+#define IPF_RCP_DATA_UNMQ_HI (2832L)
+#define IPF_RCP_DATA_UNMQ_LO (2833L)
+#define IPF_RCP_DATA_UNM_FLAG_CENC (2834L)
+#define IPF_SIZE_DEBUG (2835L)
+#define IPF_SIZE_DEBUG_N (2836L)
+#define IPF_STAT_MAX1 (2837L)
+#define IPF_STAT_MAX1_N (2838L)
+#define IPF_STAT_MAX2 (2839L)
+#define IPF_STAT_MAX2_N (2840L)
+#define IPF_STAT_MAX3 (2841L)
+#define IPF_STAT_MAX3_N (2842L)
+#define IPF_STAT_MAX4 (2843L)
+#define IPF_STAT_MAX4_N (2844L)
+#define IPF_TIMEOUT (2845L)
+#define IPF_TIMEOUT_T (2846L)
+#define IPF_UNMQ_CTRL (2847L)
+#define IPF_UNMQ_CTRL_ADR (2848L)
+#define IPF_UNMQ_CTRL_CNT (2849L)
+#define IPF_UNMQ_DATA (2850L)
+#define IPF_UNMQ_DATA_CENC (2851L)
+#define IPF_UNMQ_DATA_EN (2852L)
+#define IPF_UNMQ_DATA_ID (2853L)
+#define IPF_UNM_FEED (2854L)
+#define IPF_UNM_FEED_ADDR (2855L)
+#define IPF_UNM_FEED_CNT (2856L)
+#define IPF_UNM_FEED_FEED (2857L)
+#define IPF_UNM_FEED_FEED_VALID (2858L)
+#define IPF_UNM_FEED_RES1 (2859L)
+#define IPF_UNM_FEED_RES2 (2860L)
+#define IPF_UNM_FEED_RES3 (2861L)
+/* KM */
+#define KM_CAM_CTRL (2862L)
+#define KM_CAM_CTRL_ADR (2863L)
+#define KM_CAM_CTRL_CNT (2864L)
+#define KM_CAM_DATA (2865L)
+#define KM_CAM_DATA_FT0 (2866L)
+#define KM_CAM_DATA_FT1 (2867L)
+#define KM_CAM_DATA_FT2 (2868L)
+#define KM_CAM_DATA_FT3 (2869L)
+#define KM_CAM_DATA_FT4 (2870L)
+#define KM_CAM_DATA_FT5 (2871L)
+#define KM_CAM_DATA_W0 (2872L)
+#define KM_CAM_DATA_W1 (2873L)
+#define KM_CAM_DATA_W2 (2874L)
+#define KM_CAM_DATA_W3 (2875L)
+#define KM_CAM_DATA_W4 (2876L)
+#define KM_CAM_DATA_W5 (2877L)
+#define KM_RCP_CTRL (2878L)
+#define KM_RCP_CTRL_ADR (2879L)
+#define KM_RCP_CTRL_CNT (2880L)
+#define KM_RCP_DATA (2881L)
+#define KM_RCP_DATA_BANK_A (2882L)
+#define KM_RCP_DATA_BANK_B (2883L)
+#define KM_RCP_DATA_DUAL (2884L)
+#define KM_RCP_DATA_DW0_B_DYN (2885L)
+#define KM_RCP_DATA_DW0_B_OFS (2886L)
+#define KM_RCP_DATA_DW10_DYN (2887L)
+#define KM_RCP_DATA_DW10_OFS (2888L)
+#define KM_RCP_DATA_DW10_SEL_A (2889L)
+#define KM_RCP_DATA_DW10_SEL_B (2890L)
+#define KM_RCP_DATA_DW2_B_DYN (2891L)
+#define KM_RCP_DATA_DW2_B_OFS (2892L)
+#define KM_RCP_DATA_DW8_B_DYN (2893L)
+#define KM_RCP_DATA_DW8_B_OFS (2894L)
+#define KM_RCP_DATA_DW8_DYN (2895L)
+#define KM_RCP_DATA_DW8_OFS (2896L)
+#define KM_RCP_DATA_DW8_SEL_A (2897L)
+#define KM_RCP_DATA_DW8_SEL_B (2898L)
+#define KM_RCP_DATA_EL_A (2899L)
+#define KM_RCP_DATA_EL_B (2900L)
+#define KM_RCP_DATA_FLOW_SET (2901L)
+#define KM_RCP_DATA_FTM_A (2902L)
+#define KM_RCP_DATA_FTM_B (2903L)
+#define KM_RCP_DATA_INFO_A (2904L)
+#define KM_RCP_DATA_INFO_B (2905L)
+#define KM_RCP_DATA_KEYWAY_A (2906L)
+#define KM_RCP_DATA_KEYWAY_B (2907L)
+#define KM_RCP_DATA_KL_A (2908L)
+#define KM_RCP_DATA_KL_B (2909L)
+#define KM_RCP_DATA_MASK_A (2910L)
+#define KM_RCP_DATA_MASK_B (2911L)
+#define KM_RCP_DATA_PAIRED (2912L)
+#define KM_RCP_DATA_QW0_B_DYN (2913L)
+#define KM_RCP_DATA_QW0_B_OFS (2914L)
+#define KM_RCP_DATA_QW0_DYN (2915L)
+#define KM_RCP_DATA_QW0_OFS (2916L)
+#define KM_RCP_DATA_QW0_SEL_A (2917L)
+#define KM_RCP_DATA_QW0_SEL_B (2918L)
+#define KM_RCP_DATA_QW4_B_DYN (2919L)
+#define KM_RCP_DATA_QW4_B_OFS (2920L)
+#define KM_RCP_DATA_QW4_DYN (2921L)
+#define KM_RCP_DATA_QW4_OFS (2922L)
+#define KM_RCP_DATA_QW4_SEL_A (2923L)
+#define KM_RCP_DATA_QW4_SEL_B (2924L)
+#define KM_RCP_DATA_SW4_B_DYN (2925L)
+#define KM_RCP_DATA_SW4_B_OFS (2926L)
+#define KM_RCP_DATA_SW5_B_DYN (2927L)
+#define KM_RCP_DATA_SW5_B_OFS (2928L)
+#define KM_RCP_DATA_SW8_B_DYN (2929L)
+#define KM_RCP_DATA_SW8_B_OFS (2930L)
+#define KM_RCP_DATA_SW8_DYN (2931L)
+#define KM_RCP_DATA_SW8_OFS (2932L)
+#define KM_RCP_DATA_SW8_SEL_A (2933L)
+#define KM_RCP_DATA_SW8_SEL_B (2934L)
+#define KM_RCP_DATA_SW9_B_DYN (2935L)
+#define KM_RCP_DATA_SW9_B_OFS (2936L)
+#define KM_RCP_DATA_SW9_DYN (2937L)
+#define KM_RCP_DATA_SW9_OFS (2938L)
+#define KM_RCP_DATA_SW9_SEL_A (2939L)
+#define KM_RCP_DATA_SW9_SEL_B (2940L)
+#define KM_RCP_DATA_SWX_CCH (2941L)
+#define KM_RCP_DATA_SWX_OVS_SB (2942L)
+#define KM_RCP_DATA_SWX_SEL_A (2943L)
+#define KM_RCP_DATA_SWX_SEL_B (2944L)
+#define KM_RCP_DATA_SYNERGY_MODE (2945L)
+#define KM_STATUS (2946L)
+#define KM_STATUS_TCQ_RDY (2947L)
+#define KM_TCAM_CTRL (2948L)
+#define KM_TCAM_CTRL_ADR (2949L)
+#define KM_TCAM_CTRL_CNT (2950L)
+#define KM_TCAM_DATA (2951L)
+#define KM_TCAM_DATA_T (2952L)
+#define KM_TCI_CTRL (2953L)
+#define KM_TCI_CTRL_ADR (2954L)
+#define KM_TCI_CTRL_CNT (2955L)
+#define KM_TCI_DATA (2956L)
+#define KM_TCI_DATA_COLOR (2957L)
+#define KM_TCI_DATA_FT (2958L)
+#define KM_TCQ_CTRL (2959L)
+#define KM_TCQ_CTRL_ADR (2960L)
+#define KM_TCQ_CTRL_CNT (2961L)
+#define KM_TCQ_DATA (2962L)
+#define KM_TCQ_DATA_BANK_MASK (2963L)
+#define KM_TCQ_DATA_QUAL (2964L)
+/* LAO */
+/* MAC */
+#define MAC_CONF_SERDES_BITFRAG (2965L)
+#define MAC_CONF_SERDES_BITFRAG_BITFRAG (2966L)
+#define MAC_CONF_SERDES_DELAY (2967L)
+#define MAC_CONF_SERDES_DELAY_DELAY (2968L)
+#define MAC_CONF_SERDES_REORDER (2969L)
+#define MAC_CONF_SERDES_REORDER_REORDER (2970L)
+#define MAC_FAULTY_BLK (2971L)
+#define MAC_FAULTY_BLK_DATA (2972L)
+#define MAC_HOST_STAT_BYTE_FILL (2973L)
+#define MAC_HOST_STAT_BYTE_FILL_CNT (2974L)
+#define MAC_INT (2975L)
+#define MAC_INT_EN (2976L)
+#define MAC_INT_MAX_PACE (2977L)
+#define MAC_LINK_SUMMARY (2978L)
+#define MAC_LINK_SUMMARY_ABS (2979L)
+#define MAC_LINK_SUMMARY_GBOX_INTERR (2980L)
+#define MAC_LINK_SUMMARY_GLB_ALARMN (2981L)
+#define MAC_LINK_SUMMARY_LH_ABS (2982L)
+#define MAC_LINK_SUMMARY_LH_GLB_ALARMN (2983L)
+#define MAC_LINK_SUMMARY_LH_LOCAL_FAULT (2984L)
+#define MAC_LINK_SUMMARY_LH_REMOTE_FAULT (2985L)
+#define MAC_LINK_SUMMARY_LH_RX_LOS (2986L)
+#define MAC_LINK_SUMMARY_LINK_DOWN_CNT (2987L)
+#define MAC_LINK_SUMMARY_LL_PHY_LINK_STATE (2988L)
+#define MAC_LINK_SUMMARY_LOCAL_FAULT (2989L)
+#define MAC_LINK_SUMMARY_NT_PHY_LINK_STATE (2990L)
+#define MAC_LINK_SUMMARY_REMOTE_FAULT (2991L)
+#define MAC_LINK_SUMMARY_RX_LOS (2992L)
+#define MAC_MAC_STAT_BYTE (2993L)
+#define MAC_MAC_STAT_BYTE_CNT (2994L)
+#define MAC_MAC_STAT_CRC (2995L)
+#define MAC_MAC_STAT_CRC_CNT (2996L)
+#define MAC_MAC_STAT_CV (2997L)
+#define MAC_MAC_STAT_CV_CNT (2998L)
+#define MAC_MAC_STAT_FRAME (2999L)
+#define MAC_MAC_STAT_FRAME_CNT (3000L)
+#define MAC_MAC_STAT_MICRO_DROP (3001L)
+#define MAC_MAC_STAT_MICRO_DROP_CNT (3002L)
+#define MAC_MAC_STAT_RATE_DROP (3003L)
+#define MAC_MAC_STAT_RATE_DROP_CNT (3004L)
+#define MAC_MAC_STAT_TRUNC (3005L)
+#define MAC_MAC_STAT_TRUNC_CNT (3006L)
+#define MAC_MDS_CEN_VAL (3007L)
+#define MAC_MDS_CEN_VAL_VAL (3008L)
+#define MAC_MDS_CONF (3009L)
+#define MAC_MDS_CONF_CENTER_REC_ENA (3010L)
+#define MAC_MDS_CONF_CLR_STAT (3011L)
+#define MAC_MDS_CONF_ENA_TS_MOD (3012L)
+#define MAC_MDS_CONF_REC_ENA (3013L)
+#define MAC_MDS_CONF_TIME_MODE (3014L)
+#define MAC_MDS_DATA (3015L)
+#define MAC_MDS_DATA_DATA (3016L)
+#define MAC_MDS_FRAMES (3017L)
+#define MAC_MDS_FRAMES_CNT (3018L)
+#define MAC_MDS_MAX (3019L)
+#define MAC_MDS_MAX_MAX (3020L)
+#define MAC_MDS_MIN (3021L)
+#define MAC_MDS_MIN_MIN (3022L)
+#define MAC_MDS_STAT (3023L)
+#define MAC_MDS_STAT_CLR_BUSY (3024L)
+#define MAC_MDS_STAT_HIT_MAX (3025L)
+#define MAC_MDS_STAT_HIT_MIN (3026L)
+#define MAC_MDS_VAL_REC (3027L)
+#define MAC_MDS_VAL_REC_VALUE (3028L)
+#define MAC_MDS_VAL_REC_FRAME (3029L)
+#define MAC_MDS_VAL_REC_FRAME_VALUE (3030L)
+#define MAC_NT_PORT_CTRL (3031L)
+#define MAC_NT_PORT_CTRL_LED_MODE (3032L)
+#define MAC_RAM_MDS_ADDR (3033L)
+#define MAC_RAM_MDS_ADDR_ADR (3034L)
+#define MAC_RAM_MDS_ADDR_CLR_RAM (3035L)
+#define MAC_RAM_MDS_ADDR_RD_DONE (3036L)
+#define MAC_RAM_MDS_ADDR_RD_ENA (3037L)
+#define MAC_RAW_ADDR (3038L)
+#define MAC_RAW_ADDR_ADR (3039L)
+#define MAC_RAW_ADDR_RDENA (3040L)
+#define MAC_RAW_ADDR_RD_DONE (3041L)
+#define MAC_RAW_CTRL (3042L)
+#define MAC_RAW_CTRL_OVERWR_LM (3043L)
+#define MAC_RAW_CTRL_RESTART (3044L)
+#define MAC_RAW_CTRL_TG_ACT (3045L)
+#define MAC_RAW_CTRL_TG_ENA (3046L)
+#define MAC_RAW_CTRL_WRAP (3047L)
+#define MAC_RAW_DATA (3048L)
+#define MAC_RAW_DATA_RAW_DATA (3049L)
+#define MAC_RAW_REPETITION (3050L)
+#define MAC_RAW_REPETITION_CNT (3051L)
+#define MAC_RX_CONFIG (3052L)
+#define MAC_RX_CONFIG_DESCRAMB (3053L)
+#define MAC_RX_CONFIG_HOST_CLR_CNT (3054L)
+#define MAC_RX_CONFIG_MAC_CLR_CNT (3055L)
+#define MAC_RX_CONFIG_MIN_RX_FRAME (3056L)
+#define MAC_RX_CONFIG_NT_DEBOUNCE_LATENCY (3057L)
+#define MAC_RX_CONFIG_NT_FORCE_LINK_DOWN (3058L)
+#define MAC_RX_CONFIG_NT_LINKUP_LATENCY (3059L)
+#define MAC_RX_CONFIG_RST_BLK_ERR (3060L)
+#define MAC_RX_CONFIG_RX_MAC_EN (3061L)
+#define MAC_RX_CONFIG_TS_EOP (3062L)
+#define MAC_RX_CONFIG_TXRX_LOOP (3063L)
+#define MAC_RX_CONFIG2 (3064L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3065L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3066L)
+#define MAC_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3067L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_INT (3068L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_LINK (3069L)
+#define MAC_RX_CONFIG2_NT_RXLOS_MASK_RST (3070L)
+#define MAC_RX_STATUS (3071L)
+#define MAC_RX_STATUS_CORE_MODE (3072L)
+#define MAC_RX_STATUS_LOCAL_FAULT (3073L)
+#define MAC_RX_STATUS_REMOTE_FAULT (3074L)
+#define MAC_RX_STATUS_RXTX_OVERFLOW (3075L)
+#define MAC_RX_STATUS_VERSION (3076L)
+#define MAC_TFG_ADDR (3077L)
+#define MAC_TFG_ADDR_ADR (3078L)
+#define MAC_TFG_ADDR_RDENA (3079L)
+#define MAC_TFG_ADDR_RD_DONE (3080L)
+#define MAC_TFG_CTRL (3081L)
+#define MAC_TFG_CTRL_ID_ENA (3082L)
+#define MAC_TFG_CTRL_ID_POS (3083L)
+#define MAC_TFG_CTRL_RESTART (3084L)
+#define MAC_TFG_CTRL_TG_ACT (3085L)
+#define MAC_TFG_CTRL_TG_ENA (3086L)
+#define MAC_TFG_CTRL_TIME_MODE (3087L)
+#define MAC_TFG_CTRL_WRAP (3088L)
+#define MAC_TFG_DATA (3089L)
+#define MAC_TFG_DATA_GAP (3090L)
+#define MAC_TFG_DATA_ID (3091L)
+#define MAC_TFG_DATA_LENGTH (3092L)
+#define MAC_TFG_FRAME_HDR (3093L)
+#define MAC_TFG_FRAME_HDR_HDR (3094L)
+#define MAC_TFG_REPETITION (3095L)
+#define MAC_TFG_REPETITION_CNT (3096L)
+#define MAC_TX_CONFIG (3097L)
+#define MAC_TX_CONFIG_CLR_STICKY (3098L)
+#define MAC_TX_CONFIG_CRC_ERR_INS (3099L)
+#define MAC_TX_CONFIG_HOST_TX_ENA (3100L)
+#define MAC_TX_CONFIG_MAC_LOOP (3101L)
+#define MAC_TX_CONFIG_PCS_BIP_ERR (3102L)
+#define MAC_TX_CONFIG_PCS_DIS_BIP_INS (3103L)
+#define MAC_TX_CONFIG_PCS_IDLE (3104L)
+#define MAC_TX_CONFIG_PCS_IDLE_DIS (3105L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT (3106L)
+#define MAC_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3107L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT (3108L)
+#define MAC_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3109L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ENA (3110L)
+#define MAC_TX_CONFIG_PCS_SCRAMB_ERR (3111L)
+#define MAC_TX_CONFIG_TIME_OFFSET_TX (3112L)
+#define MAC_TX_CONFIG_TS_EOP (3113L)
+#define MAC_TX_STATUS (3114L)
+#define MAC_TX_STATUS_PCS_ERR (3115L)
+#define MAC_TX_STATUS_TX_MAC_ST (3116L)
+#define MAC_TX_STATUS_UNDER_FLOW (3117L)
+#define MAC_UPD_RX_COUNTERS (3118L)
+#define MAC_UPD_RX_COUNTERS_TRIGGER (3119L)
+/* MAC10 */
+#define MAC10_CFG_0 (3135L)
+#define MAC10_CFG_0_PAD_ENA (3136L)
+#define MAC10_CFG_0_RX_ENA (3137L)
+#define MAC10_CFG_0_RX_PAUSE_ENA (3138L)
+#define MAC10_CFG_0_STR_ENA (3139L)
+#define MAC10_CFG_0_TX_ENA (3140L)
+#define MAC10_CFG_0_TX_PAUSE_ENA (3141L)
+#define MAC10_MA (3142L)
+#define MAC10_MA_MAC10_ADR_0 (3143L)
+#define MAC10_MA_MAC_ADR_0 (3144L)
+#define MAC10_MA_LO (3145L)
+#define MAC10_MA_LO_MA_LO (3146L)
+#define MAC10_MA_MAX_SIZE (3147L)
+#define MAC10_MA_MAX_SIZE_MAC10_ADR_1 (3148L)
+#define MAC10_MA_MAX_SIZE_MAC_ADR_1 (3149L)
+#define MAC10_MA_MAX_SIZE_MTU (3150L)
+#define MAC10_MA_UP (3151L)
+#define MAC10_MA_UP_MA_UP (3152L)
+#define MAC10_STICKY_XAUI (3153L)
+#define MAC10_STICKY_XAUI_STICKY_XAUI (3154L)
+/* MAC100 */
+#define MAC100_CONF_SERDES_BITFRAG (3155L)
+#define MAC100_CONF_SERDES_BITFRAG_BITFRAG (3156L)
+#define MAC100_CONF_SERDES_DELAY (3157L)
+#define MAC100_CONF_SERDES_DELAY_DELAY (3158L)
+#define MAC100_CONF_SERDES_REORDER (3159L)
+#define MAC100_CONF_SERDES_REORDER_REORDER (3160L)
+#define MAC100_FAULTY_BLK (3161L)
+#define MAC100_FAULTY_BLK_DATA (3162L)
+#define MAC100_HOST_STAT_BYTE (3163L)
+#define MAC100_HOST_STAT_BYTE_CNT (3164L)
+#define MAC100_HOST_STAT_BYTE_FILL (3165L)
+#define MAC100_HOST_STAT_BYTE_FILL_CNT (3166L)
+#define MAC100_HOST_STAT_CRC (3167L)
+#define MAC100_HOST_STAT_CRC_CNT (3168L)
+#define MAC100_HOST_STAT_CV (3169L)
+#define MAC100_HOST_STAT_CV_CNT (3170L)
+#define MAC100_HOST_STAT_DROP (3171L)
+#define MAC100_HOST_STAT_DROP_CNT (3172L)
+#define MAC100_HOST_STAT_DROP_BYTE (3173L)
+#define MAC100_HOST_STAT_DROP_BYTE_CNT (3174L)
+#define MAC100_HOST_STAT_FRAME (3175L)
+#define MAC100_HOST_STAT_FRAME_CNT (3176L)
+#define MAC100_HOST_STAT_FRAME_FILL (3177L)
+#define MAC100_HOST_STAT_FRAME_FILL_CNT (3178L)
+#define MAC100_INT (3179L)
+#define MAC100_INT_EN (3180L)
+#define MAC100_INT_MAX_PACE (3181L)
+#define MAC100_LINK_SUMMARY (3182L)
+#define MAC100_LINK_SUMMARY_ABS (3183L)
+#define MAC100_LINK_SUMMARY_GBOX_INTERR (3184L)
+#define MAC100_LINK_SUMMARY_GLB_ALARMN (3185L)
+#define MAC100_LINK_SUMMARY_LH_ABS (3186L)
+#define MAC100_LINK_SUMMARY_LH_GLB_ALARMN (3187L)
+#define MAC100_LINK_SUMMARY_LH_LOCAL_FAULT (3188L)
+#define MAC100_LINK_SUMMARY_LH_REMOTE_FAULT (3189L)
+#define MAC100_LINK_SUMMARY_LH_RX_LOS (3190L)
+#define MAC100_LINK_SUMMARY_LINK_DOWN_CNT (3191L)
+#define MAC100_LINK_SUMMARY_LL_PHY_LINK_STATE (3192L)
+#define MAC100_LINK_SUMMARY_LOCAL_FAULT (3193L)
+#define MAC100_LINK_SUMMARY_NT_PHY_LINK_STATE (3194L)
+#define MAC100_LINK_SUMMARY_REMOTE_FAULT (3195L)
+#define MAC100_LINK_SUMMARY_RX_LOS (3196L)
+#define MAC100_MAC_STAT_BYTE (3197L)
+#define MAC100_MAC_STAT_BYTE_CNT (3198L)
+#define MAC100_MAC_STAT_CRC (3199L)
+#define MAC100_MAC_STAT_CRC_CNT (3200L)
+#define MAC100_MAC_STAT_CV (3201L)
+#define MAC100_MAC_STAT_CV_CNT (3202L)
+#define MAC100_MAC_STAT_FC (3203L)
+#define MAC100_MAC_STAT_FC_CNT (3204L)
+#define MAC100_MAC_STAT_FRAME (3205L)
+#define MAC100_MAC_STAT_FRAME_CNT (3206L)
+#define MAC100_MAC_STAT_MICRO_DROP (3207L)
+#define MAC100_MAC_STAT_MICRO_DROP_CNT (3208L)
+#define MAC100_MAC_STAT_PAUSE (3209L)
+#define MAC100_MAC_STAT_PAUSE_CNT (3210L)
+#define MAC100_MAC_STAT_RATE_DROP (3211L)
+#define MAC100_MAC_STAT_RATE_DROP_CNT (3212L)
+#define MAC100_MAC_STAT_TRUNC (3213L)
+#define MAC100_MAC_STAT_TRUNC_CNT (3214L)
+#define MAC100_MDS_CEN_VAL (3215L)
+#define MAC100_MDS_CEN_VAL_VAL (3216L)
+#define MAC100_MDS_CONF (3217L)
+#define MAC100_MDS_CONF_CENTER_REC_ENA (3218L)
+#define MAC100_MDS_CONF_CLR_STAT (3219L)
+#define MAC100_MDS_CONF_ENA_TS_MOD (3220L)
+#define MAC100_MDS_CONF_REC_ENA (3221L)
+#define MAC100_MDS_CONF_TIME_MODE (3222L)
+#define MAC100_MDS_DATA (3223L)
+#define MAC100_MDS_DATA_DATA (3224L)
+#define MAC100_MDS_FRAMES (3225L)
+#define MAC100_MDS_FRAMES_CNT (3226L)
+#define MAC100_MDS_MAX (3227L)
+#define MAC100_MDS_MAX_MAX (3228L)
+#define MAC100_MDS_MIN (3229L)
+#define MAC100_MDS_MIN_MIN (3230L)
+#define MAC100_MDS_STAT (3231L)
+#define MAC100_MDS_STAT_CLR_BUSY (3232L)
+#define MAC100_MDS_STAT_HIT_MAX (3233L)
+#define MAC100_MDS_STAT_HIT_MIN (3234L)
+#define MAC100_MDS_VAL_REC (3235L)
+#define MAC100_MDS_VAL_REC_VALUE (3236L)
+#define MAC100_MDS_VAL_REC_FRAME (3237L)
+#define MAC100_MDS_VAL_REC_FRAME_VALUE (3238L)
+#define MAC100_NT_PORT_CTRL (3239L)
+#define MAC100_NT_PORT_CTRL_LED_MODE (3240L)
+#define MAC100_RAM_MDS_ADDR (3241L)
+#define MAC100_RAM_MDS_ADDR_ADR (3242L)
+#define MAC100_RAM_MDS_ADDR_CLR_RAM (3243L)
+#define MAC100_RAM_MDS_ADDR_RD_DONE (3244L)
+#define MAC100_RAM_MDS_ADDR_RD_ENA (3245L)
+#define MAC100_RAW_ADDR (3246L)
+#define MAC100_RAW_ADDR_ADR (3247L)
+#define MAC100_RAW_ADDR_RDENA (3248L)
+#define MAC100_RAW_ADDR_RD_DONE (3249L)
+#define MAC100_RAW_CTRL (3250L)
+#define MAC100_RAW_CTRL_OVERWR_LM (3251L)
+#define MAC100_RAW_CTRL_RESTART (3252L)
+#define MAC100_RAW_CTRL_TG_ACT (3253L)
+#define MAC100_RAW_CTRL_TG_ENA (3254L)
+#define MAC100_RAW_CTRL_WRAP (3255L)
+#define MAC100_RAW_DATA (3256L)
+#define MAC100_RAW_DATA_RAW_DATA (3257L)
+#define MAC100_RAW_REPETITION (3258L)
+#define MAC100_RAW_REPETITION_CNT (3259L)
+#define MAC100_RX_CONFIG (3260L)
+#define MAC100_RX_CONFIG_DESCRAMB (3261L)
+#define MAC100_RX_CONFIG_HADP_RUN_MODE (3262L)
+#define MAC100_RX_CONFIG_HOST_CLR_CNT (3263L)
+#define MAC100_RX_CONFIG_MAC_CLR_CNT (3264L)
+#define MAC100_RX_CONFIG_MIN_RX_FRAME (3265L)
+#define MAC100_RX_CONFIG_NT_DEBOUNCE_LATENCY (3266L)
+#define MAC100_RX_CONFIG_NT_FORCE_LINK_DOWN (3267L)
+#define MAC100_RX_CONFIG_NT_LINKUP_LATENCY (3268L)
+#define MAC100_RX_CONFIG_RST_BLK_ERR (3269L)
+#define MAC100_RX_CONFIG_RX_MAC_EN (3270L)
+#define MAC100_RX_CONFIG_TS_EOP (3271L)
+#define MAC100_RX_CONFIG_TXRX_LOOP (3272L)
+#define MAC100_RX_CONFIG2 (3273L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_INT (3274L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_LINK (3275L)
+#define MAC100_RX_CONFIG2_NT_MOD_ABS_MASK_RST (3276L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_INT (3277L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_LINK (3278L)
+#define MAC100_RX_CONFIG2_NT_RXLOS_MASK_RST (3279L)
+#define MAC100_RX_STATUS (3280L)
+#define MAC100_RX_STATUS_CORE_MODE (3281L)
+#define MAC100_RX_STATUS_LOCAL_FAULT (3282L)
+#define MAC100_RX_STATUS_REMOTE_FAULT (3283L)
+#define MAC100_RX_STATUS_RXTX_OVERFLOW (3284L)
+#define MAC100_RX_STATUS_VERSION (3285L)
+#define MAC100_TFG_ADDR (3286L)
+#define MAC100_TFG_ADDR_ADR (3287L)
+#define MAC100_TFG_ADDR_RDENA (3288L)
+#define MAC100_TFG_ADDR_RD_DONE (3289L)
+#define MAC100_TFG_CTRL (3290L)
+#define MAC100_TFG_CTRL_ID_ENA (3291L)
+#define MAC100_TFG_CTRL_ID_POS (3292L)
+#define MAC100_TFG_CTRL_RESTART (3293L)
+#define MAC100_TFG_CTRL_TG_ACT (3294L)
+#define MAC100_TFG_CTRL_TG_ENA (3295L)
+#define MAC100_TFG_CTRL_TIME_MODE (3296L)
+#define MAC100_TFG_CTRL_WRAP (3297L)
+#define MAC100_TFG_DATA (3298L)
+#define MAC100_TFG_DATA_GAP (3299L)
+#define MAC100_TFG_DATA_ID (3300L)
+#define MAC100_TFG_DATA_LENGTH (3301L)
+#define MAC100_TFG_FRAME_HDR (3302L)
+#define MAC100_TFG_FRAME_HDR_HDR (3303L)
+#define MAC100_TFG_REPETITION (3304L)
+#define MAC100_TFG_REPETITION_CNT (3305L)
+#define MAC100_TX_CONFIG (3306L)
+#define MAC100_TX_CONFIG_CLR_STICKY (3307L)
+#define MAC100_TX_CONFIG_CRC_ERR_INS (3308L)
+#define MAC100_TX_CONFIG_HADP_LOOP (3309L)
+#define MAC100_TX_CONFIG_HOST_TX_ENA (3310L)
+#define MAC100_TX_CONFIG_MAC_LOOP (3311L)
+#define MAC100_TX_CONFIG_PCS_BIP_ERR (3312L)
+#define MAC100_TX_CONFIG_PCS_DIS_BIP_INS (3313L)
+#define MAC100_TX_CONFIG_PCS_IDLE (3314L)
+#define MAC100_TX_CONFIG_PCS_IDLE_DIS (3315L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT (3316L)
+#define MAC100_TX_CONFIG_PCS_LOCAL_FAULT_DIS (3317L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT (3318L)
+#define MAC100_TX_CONFIG_PCS_REMOTE_FAULT_DIS (3319L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ENA (3320L)
+#define MAC100_TX_CONFIG_PCS_SCRAMB_ERR (3321L)
+#define MAC100_TX_CONFIG_TIME_OFFSET_TX (3322L)
+#define MAC100_TX_CONFIG_TS_EOP (3323L)
+#define MAC100_TX_STATUS (3324L)
+#define MAC100_TX_STATUS_PCS_ERR (3325L)
+#define MAC100_TX_STATUS_TX_HADP_ST (3326L)
+#define MAC100_TX_STATUS_TX_MAC_ST (3327L)
+#define MAC100_TX_STATUS_UNDER_FLOW (3328L)
+#define MAC100_UPD_RX_COUNTERS (3329L)
+#define MAC100_UPD_RX_COUNTERS_TRIGGER (3330L)
+/* MAC10G */
+#define MAC10G_CFG (3331L)
+#define MAC10G_CFG_MIN_RX_FRAME (3332L)
+#define MAC10G_CFG_RX_ENA (3333L)
+#define MAC10G_CFG_RX_EOP_TS (3334L)
+#define MAC10G_CFG_RX_PAUSE_ENA (3335L)
+#define MAC10G_CFG_STR_ENA (3336L)
+#define MAC10G_CFG_TX_ENA (3337L)
+#define MAC10G_CFG_TX_PAUSE_ENA (3338L)
+#define MAC10G_MA_LO (3339L)
+#define MAC10G_MA_LO_MA_LO (3340L)
+#define MAC10G_MA_UP (3341L)
+#define MAC10G_MA_UP_MA_UP (3342L)
+/* MAC1G */
+#define MAC1G_CFG (3343L)
+#define MAC1G_CFG_MIN_RX_FRAME (3344L)
+#define MAC1G_CFG_RX_ENA (3345L)
+#define MAC1G_CFG_RX_EOP_TS (3346L)
+#define MAC1G_CFG_RX_PAUSE_ENA (3347L)
+#define MAC1G_CFG_SPEED (3348L)
+#define MAC1G_CFG_STR_ENA (3349L)
+#define MAC1G_CFG_TX_ENA (3350L)
+#define MAC1G_CFG_TX_PAUSE_ENA (3351L)
+#define MAC1G_MA_LO (3352L)
+#define MAC1G_MA_LO_MA_LO (3353L)
+#define MAC1G_MA_UP (3354L)
+#define MAC1G_MA_UP_MA_UP (3355L)
+/* MAC_PCS */
+#define MAC_PCS_BAD_CODE (3366L)
+#define MAC_PCS_BAD_CODE_CODE_ERR (3367L)
+#define MAC_PCS_BIP_ERR (3368L)
+#define MAC_PCS_BIP_ERR_BIP_ERR (3369L)
+#define MAC_PCS_BLOCK_LOCK (3370L)
+#define MAC_PCS_BLOCK_LOCK_LOCK (3371L)
+#define MAC_PCS_BLOCK_LOCK_CHG (3372L)
+#define MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG (3373L)
+#define MAC_PCS_CLKRX_FRQ (3374L)
+#define MAC_PCS_CLKRX_FRQ_RX_FREQ (3375L)
+#define MAC_PCS_CLKTX_FRQ (3376L)
+#define MAC_PCS_CLKTX_FRQ_TX_FREQ (3377L)
+#define MAC_PCS_DEBOUNCE_CTRL (3378L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY (3379L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN (3380L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY (3381L)
+#define MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL (3382L)
+#define MAC_PCS_DRP_CONFIG (3383L)
+#define MAC_PCS_DRP_CONFIG_DRP_ADR (3384L)
+#define MAC_PCS_DRP_CONFIG_DRP_DI (3385L)
+#define MAC_PCS_DRP_CONFIG_DRP_EN (3386L)
+#define MAC_PCS_DRP_CONFIG_DRP_MOD_ADR (3387L)
+#define MAC_PCS_DRP_CONFIG_DRP_WREN (3388L)
+#define MAC_PCS_DRP_CTRL (3389L)
+#define MAC_PCS_DRP_CTRL_ADR (3390L)
+#define MAC_PCS_DRP_CTRL_DATA (3391L)
+#define MAC_PCS_DRP_CTRL_DBG_BUSY (3392L)
+#define MAC_PCS_DRP_CTRL_DONE (3393L)
+#define MAC_PCS_DRP_CTRL_MOD_ADR (3394L)
+#define MAC_PCS_DRP_CTRL_WREN (3395L)
+#define MAC_PCS_DRP_DATA (3396L)
+#define MAC_PCS_DRP_DATA_DRP_DO (3397L)
+#define MAC_PCS_DRP_DATA_DRP_RDY (3398L)
+#define MAC_PCS_FEC_CTRL (3399L)
+#define MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN (3400L)
+#define MAC_PCS_FEC_CW_CNT (3401L)
+#define MAC_PCS_FEC_CW_CNT_CW_CNT (3402L)
+#define MAC_PCS_FEC_ERR_CNT_0 (3403L)
+#define MAC_PCS_FEC_ERR_CNT_0_ERR_CNT (3404L)
+#define MAC_PCS_FEC_ERR_CNT_1 (3405L)
+#define MAC_PCS_FEC_ERR_CNT_1_ERR_CNT (3406L)
+#define MAC_PCS_FEC_ERR_CNT_2 (3407L)
+#define MAC_PCS_FEC_ERR_CNT_2_ERR_CNT (3408L)
+#define MAC_PCS_FEC_ERR_CNT_3 (3409L)
+#define MAC_PCS_FEC_ERR_CNT_3_ERR_CNT (3410L)
+#define MAC_PCS_FEC_LANE_DLY_0 (3411L)
+#define MAC_PCS_FEC_LANE_DLY_0_DLY (3412L)
+#define MAC_PCS_FEC_LANE_DLY_1 (3413L)
+#define MAC_PCS_FEC_LANE_DLY_1_DLY (3414L)
+#define MAC_PCS_FEC_LANE_DLY_2 (3415L)
+#define MAC_PCS_FEC_LANE_DLY_2_DLY (3416L)
+#define MAC_PCS_FEC_LANE_DLY_3 (3417L)
+#define MAC_PCS_FEC_LANE_DLY_3_DLY (3418L)
+#define MAC_PCS_FEC_LANE_MAP (3419L)
+#define MAC_PCS_FEC_LANE_MAP_MAPPING (3420L)
+#define MAC_PCS_FEC_STAT (3421L)
+#define MAC_PCS_FEC_STAT_AM_LOCK (3422L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_0 (3423L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_1 (3424L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_2 (3425L)
+#define MAC_PCS_FEC_STAT_AM_LOCK_3 (3426L)
+#define MAC_PCS_FEC_STAT_BLOCK_LOCK (3427L)
+#define MAC_PCS_FEC_STAT_BYPASS (3428L)
+#define MAC_PCS_FEC_STAT_FEC_LANE_ALGN (3429L)
+#define MAC_PCS_FEC_STAT_HI_SER (3430L)
+#define MAC_PCS_FEC_STAT_PCS_LANE_ALGN (3431L)
+#define MAC_PCS_FEC_STAT_VALID (3432L)
+#define MAC_PCS_FEC_UCW_CNT (3433L)
+#define MAC_PCS_FEC_UCW_CNT_UCW_CNT (3434L)
+#define MAC_PCS_FRAMING_ERR (3435L)
+#define MAC_PCS_FRAMING_ERR_FRAMING_ERR (3436L)
+#define MAC_PCS_GTY_CTL (3437L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_0 (3438L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_1 (3439L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_2 (3440L)
+#define MAC_PCS_GTY_CTL_CDR_HOLD_3 (3441L)
+#define MAC_PCS_GTY_CTL_RX (3442L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_0 (3443L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_1 (3444L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_2 (3445L)
+#define MAC_PCS_GTY_CTL_RX_CDR_HOLD_3 (3446L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_0 (3447L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_1 (3448L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_2 (3449L)
+#define MAC_PCS_GTY_CTL_RX_EQUA_RST_3 (3450L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_0 (3451L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_1 (3452L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_2 (3453L)
+#define MAC_PCS_GTY_CTL_RX_LPM_EN_3 (3454L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_0 (3455L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_1 (3456L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_2 (3457L)
+#define MAC_PCS_GTY_CTL_RX_POLARITY_3 (3458L)
+#define MAC_PCS_GTY_CTL_RX_RATE_0 (3459L)
+#define MAC_PCS_GTY_CTL_RX_RATE_1 (3460L)
+#define MAC_PCS_GTY_CTL_RX_RATE_2 (3461L)
+#define MAC_PCS_GTY_CTL_RX_RATE_3 (3462L)
+#define MAC_PCS_GTY_CTL_TX (3463L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_0 (3464L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_1 (3465L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_2 (3466L)
+#define MAC_PCS_GTY_CTL_TX_INHIBIT_3 (3467L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_0 (3468L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_1 (3469L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_2 (3470L)
+#define MAC_PCS_GTY_CTL_TX_POLARITY_3 (3471L)
+#define MAC_PCS_GTY_DIFF_CTL (3472L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0 (3473L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1 (3474L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2 (3475L)
+#define MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3 (3476L)
+#define MAC_PCS_GTY_LOOP (3477L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_0 (3478L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_1 (3479L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_2 (3480L)
+#define MAC_PCS_GTY_LOOP_GT_LOOP_3 (3481L)
+#define MAC_PCS_GTY_POST_CURSOR (3482L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0 (3483L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1 (3484L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2 (3485L)
+#define MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3 (3486L)
+#define MAC_PCS_GTY_PRBS_SEL (3487L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0 (3488L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1 (3489L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2 (3490L)
+#define MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3 (3491L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0 (3492L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1 (3493L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2 (3494L)
+#define MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3 (3495L)
+#define MAC_PCS_GTY_PRE_CURSOR (3496L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0 (3497L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1 (3498L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2 (3499L)
+#define MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3 (3500L)
+#define MAC_PCS_GTY_RX_BUF_STAT (3501L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0 (3502L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1 (3503L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2 (3504L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3 (3505L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0 (3506L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1 (3507L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2 (3508L)
+#define MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3 (3509L)
+#define MAC_PCS_GTY_SCAN_CTL (3510L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0 (3511L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1 (3512L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2 (3513L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3 (3514L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0 (3515L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1 (3516L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2 (3517L)
+#define MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3 (3518L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0 (3519L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1 (3520L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2 (3521L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3 (3522L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0 (3523L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1 (3524L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2 (3525L)
+#define MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3 (3526L)
+#define MAC_PCS_GTY_SCAN_STAT (3527L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0 (3528L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1 (3529L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2 (3530L)
+#define MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3 (3531L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0 (3532L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1 (3533L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2 (3534L)
+#define MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3 (3535L)
+#define MAC_PCS_GTY_STAT (3536L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_0 (3537L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_1 (3538L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_2 (3539L)
+#define MAC_PCS_GTY_STAT_RX_RST_DONE_3 (3540L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_0 (3541L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_1 (3542L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_2 (3543L)
+#define MAC_PCS_GTY_STAT_TX_BUF_STAT_3 (3544L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_0 (3545L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_1 (3546L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_2 (3547L)
+#define MAC_PCS_GTY_STAT_TX_RST_DONE_3 (3548L)
+#define MAC_PCS_LANE_ALIGNER_FILL (3549L)
+#define MAC_PCS_LANE_ALIGNER_FILL_FILL (3550L)
+#define MAC_PCS_LINK_SUMMARY (3551L)
+#define MAC_PCS_LINK_SUMMARY_ABS (3552L)
+#define MAC_PCS_LINK_SUMMARY_LH_ABS (3553L)
+#define MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT (3554L)
+#define MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT (3555L)
+#define MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT (3556L)
+#define MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE (3557L)
+#define MAC_PCS_LINK_SUMMARY_LOCAL_FAULT (3558L)
+#define MAC_PCS_LINK_SUMMARY_NIM_INTERR (3559L)
+#define MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE (3560L)
+#define MAC_PCS_LINK_SUMMARY_REMOTE_FAULT (3561L)
+#define MAC_PCS_LINK_SUMMARY_RESERVED (3562L)
+#define MAC_PCS_MAC_PCS_CONFIG (3563L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST (3564L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE (3565L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC (3566L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST (3567L)
+#define MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN (3568L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST (3569L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE (3570L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE (3571L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST (3572L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE (3573L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI (3574L)
+#define MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN (3575L)
+#define MAC_PCS_MAX_PKT_LEN (3576L)
+#define MAC_PCS_MAX_PKT_LEN_MAX_LEN (3577L)
+#define MAC_PCS_MF_ERR (3578L)
+#define MAC_PCS_MF_ERR_MF_ERR (3579L)
+#define MAC_PCS_MF_LEN_ERR (3580L)
+#define MAC_PCS_MF_LEN_ERR_MF_LEN_ERR (3581L)
+#define MAC_PCS_MF_REPEAT_ERR (3582L)
+#define MAC_PCS_MF_REPEAT_ERR_MF_REPEAT_ERR (3583L)
+#define MAC_PCS_PHYMAC_MISC (3584L)
+#define MAC_PCS_PHYMAC_MISC_TS_EOP (3585L)
+#define MAC_PCS_PHYMAC_MISC_TX_MUX_STATE (3586L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_HOST (3587L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP (3588L)
+#define MAC_PCS_PHYMAC_MISC_TX_SEL_TFG (3589L)
+#define MAC_PCS_PHY_STAT (3590L)
+#define MAC_PCS_PHY_STAT_ALARM (3591L)
+#define MAC_PCS_PHY_STAT_MOD_PRS (3592L)
+#define MAC_PCS_PHY_STAT_RX_LOS (3593L)
+#define MAC_PCS_STAT_PCS_RX (3594L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED (3595L)
+#define MAC_PCS_STAT_PCS_RX_ALIGNED_ERR (3596L)
+#define MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS (3597L)
+#define MAC_PCS_STAT_PCS_RX_HI_BER (3598L)
+#define MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT (3599L)
+#define MAC_PCS_STAT_PCS_RX_LOCAL_FAULT (3600L)
+#define MAC_PCS_STAT_PCS_RX_MISALIGNED (3601L)
+#define MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT (3602L)
+#define MAC_PCS_STAT_PCS_RX_REMOTE_FAULT (3603L)
+#define MAC_PCS_STAT_PCS_RX_STATUS (3604L)
+#define MAC_PCS_STAT_PCS_RX_LATCH (3605L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED (3606L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR (3607L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS (3608L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_HI_BER (3609L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT (3610L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT (3611L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED (3612L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT (3613L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT (3614L)
+#define MAC_PCS_STAT_PCS_RX_LATCH_STATUS (3615L)
+#define MAC_PCS_STAT_PCS_TX (3616L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT (3617L)
+#define MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED (3618L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR (3619L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED (3620L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR (3621L)
+#define MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED (3622L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT (3623L)
+#define MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED (3624L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT (3625L)
+#define MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED (3626L)
+#define MAC_PCS_SYNCED (3627L)
+#define MAC_PCS_SYNCED_SYNC (3628L)
+#define MAC_PCS_SYNCED_ERR (3629L)
+#define MAC_PCS_SYNCED_ERR_SYNC_ERROR (3630L)
+#define MAC_PCS_TEST_ERR (3631L)
+#define MAC_PCS_TEST_ERR_CODE_ERR (3632L)
+#define MAC_PCS_TIMESTAMP_COMP (3633L)
+#define MAC_PCS_TIMESTAMP_COMP_RX_DLY (3634L)
+#define MAC_PCS_TIMESTAMP_COMP_TX_DLY (3635L)
+#define MAC_PCS_VL_DEMUXED (3636L)
+#define MAC_PCS_VL_DEMUXED_LOCK (3637L)
+#define MAC_PCS_VL_DEMUXED_CHG (3638L)
+#define MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG (3639L)
+#define MAC_PCS_VL_NUMBER (3640L)
+#define MAC_PCS_VL_NUMBER_VL_NUMBER (3641L)
+/* MAC_PCS_XXV */
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0 (3642L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ASMDIR (3643L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_BYPASS (3644L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_ENABLE (3645L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_PAUSE (3646L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_0_RESTART (3647L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1 (3648L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ASMDIR (3649L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_BYPASS (3650L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_ENABLE (3651L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_PAUSE (3652L)
+#define MAC_PCS_XXV_ANEG_1G_CONFIG_1_RESTART (3653L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0 (3654L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_COMPLETE (3655L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ANEG_ABLE (3656L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_ASM (3657L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_PAUSE (3658L)
+#define MAC_PCS_XXV_ANEG_1G_STA_0_LP_RF (3659L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1 (3660L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_COMPLETE (3661L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ANEG_ABLE (3662L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_ASM (3663L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_PAUSE (3664L)
+#define MAC_PCS_XXV_ANEG_1G_STA_1_LP_RF (3665L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0 (3666L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR (3667L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1 (3668L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S (3669L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR (3670L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1 (3671L)
+#define MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S (3672L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1 (3673L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR (3674L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1 (3675L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S (3676L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR (3677L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1 (3678L)
+#define MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S (3679L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2 (3680L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR (3681L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR1 (3682L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_BASE25G_CR_S (3683L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR (3684L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1 (3685L)
+#define MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S (3686L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3 (3687L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR (3688L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR1 (3689L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_BASE25G_CR_S (3690L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR (3691L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1 (3692L)
+#define MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S (3693L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0 (3694L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR (3695L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS (3696L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE (3697L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST (3698L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G (3699L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY (3700L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST (3701L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74 (3702L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED (3703L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE (3704L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO (3705L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT (3706L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RESTART (3707L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST (3708L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE (3709L)
+#define MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE (3710L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1 (3711L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR (3712L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS (3713L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE (3714L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST (3715L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G (3716L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY (3717L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST (3718L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74 (3719L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED (3720L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE (3721L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO (3722L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT (3723L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RESTART (3724L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST (3725L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE (3726L)
+#define MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE (3727L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2 (3728L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR (3729L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS (3730L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE (3731L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST (3732L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G (3733L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY (3734L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST (3735L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74 (3736L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED (3737L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE (3738L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO (3739L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT (3740L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RESTART (3741L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST (3742L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE (3743L)
+#define MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE (3744L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3 (3745L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR (3746L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS (3747L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE (3748L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST (3749L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G (3750L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY (3751L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST (3752L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74 (3753L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED (3754L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE (3755L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO (3756L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT (3757L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RESTART (3758L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST (3759L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE (3760L)
+#define MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE (3761L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0 (3762L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_END (3763L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_ANEG_STARTED (3764L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_CDR_HOLD (3765L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_END (3766L)
+#define MAC_PCS_XXV_ANEG_DEBUG_0_LT_STARTED (3767L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1 (3768L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_END (3769L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_ANEG_STARTED (3770L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_CDR_HOLD (3771L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_END (3772L)
+#define MAC_PCS_XXV_ANEG_DEBUG_1_LT_STARTED (3773L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2 (3774L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_END (3775L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_ANEG_STARTED (3776L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_CDR_HOLD (3777L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_END (3778L)
+#define MAC_PCS_XXV_ANEG_DEBUG_2_LT_STARTED (3779L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3 (3780L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_END (3781L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_ANEG_STARTED (3782L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_CDR_HOLD (3783L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_END (3784L)
+#define MAC_PCS_XXV_ANEG_DEBUG_3_LT_STARTED (3785L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0 (3786L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR (3787L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR1 (3788L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_0_LINK_CR_S (3789L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1 (3790L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR (3791L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR1 (3792L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_1_LINK_CR_S (3793L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2 (3794L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR (3795L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR1 (3796L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_2_LINK_CR_S (3797L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3 (3798L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR (3799L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR1 (3800L)
+#define MAC_PCS_XXV_ANEG_LINK_STA_3_LINK_CR_S (3801L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0 (3802L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR (3803L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_0_LP_25GBASE_CR_S (3804L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1 (3805L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR (3806L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_1_LP_25GBASE_CR_S (3807L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2 (3808L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR (3809L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_2_LP_25GBASE_CR_S (3810L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3 (3811L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR (3812L)
+#define MAC_PCS_XXV_ANEG_LP_ABILITIES_3_LP_25GBASE_CR_S (3813L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0 (3814L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_ABILITY (3815L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC74_REQUEST (3816L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_ABILITY (3817L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_C_FEC91_REQUEST (3818L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_25GBASE_CR1 (3819L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_0_LP_EX_ABILITY_VALID (3820L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1 (3821L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_ABILITY (3822L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC74_REQUEST (3823L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_ABILITY (3824L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_C_FEC91_REQUEST (3825L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_25GBASE_CR1 (3826L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_1_LP_EX_ABILITY_VALID (3827L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2 (3828L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_ABILITY (3829L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC74_REQUEST (3830L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_ABILITY (3831L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_C_FEC91_REQUEST (3832L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_25GBASE_CR1 (3833L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_2_LP_EX_ABILITY_VALID (3834L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3 (3835L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_ABILITY (3836L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC74_REQUEST (3837L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_ABILITY (3838L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_C_FEC91_REQUEST (3839L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_25GBASE_CR1 (3840L)
+#define MAC_PCS_XXV_ANEG_LP_EX_ABILITIES_3_LP_EX_ABILITY_VALID (3841L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0 (3842L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ABILITY_VALID (3843L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ANEG_ABLE (3844L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_ASM (3845L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_FEC74_REQ (3846L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_PAUSE (3847L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RF (3848L)
+#define MAC_PCS_XXV_ANEG_LP_STA_0_LP_RS_FEC_REQ (3849L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1 (3850L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ABILITY_VALID (3851L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ANEG_ABLE (3852L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_ASM (3853L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_FEC74_REQ (3854L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_PAUSE (3855L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RF (3856L)
+#define MAC_PCS_XXV_ANEG_LP_STA_1_LP_RS_FEC_REQ (3857L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2 (3858L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ABILITY_VALID (3859L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ANEG_ABLE (3860L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_ASM (3861L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_FEC74_REQ (3862L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_PAUSE (3863L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RF (3864L)
+#define MAC_PCS_XXV_ANEG_LP_STA_2_LP_RS_FEC_REQ (3865L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3 (3866L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ABILITY_VALID (3867L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ANEG_ABLE (3868L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_ASM (3869L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_FEC74_REQ (3870L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_PAUSE (3871L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RF (3872L)
+#define MAC_PCS_XXV_ANEG_LP_STA_3_LP_RS_FEC_REQ (3873L)
+#define MAC_PCS_XXV_ANEG_STA_0 (3874L)
+#define MAC_PCS_XXV_ANEG_STA_0_COMPLETE (3875L)
+#define MAC_PCS_XXV_ANEG_STA_0_FEC74_EN (3876L)
+#define MAC_PCS_XXV_ANEG_STA_0_PAR_D_FAULT (3877L)
+#define MAC_PCS_XXV_ANEG_STA_0_RS_FEC_EN (3878L)
+#define MAC_PCS_XXV_ANEG_STA_0_RX_PAUSE_EN (3879L)
+#define MAC_PCS_XXV_ANEG_STA_0_TX_PAUSE_EN (3880L)
+#define MAC_PCS_XXV_ANEG_STA_1 (3881L)
+#define MAC_PCS_XXV_ANEG_STA_1_COMPLETE (3882L)
+#define MAC_PCS_XXV_ANEG_STA_1_FEC74_EN (3883L)
+#define MAC_PCS_XXV_ANEG_STA_1_PAR_D_FAULT (3884L)
+#define MAC_PCS_XXV_ANEG_STA_1_RS_FEC_EN (3885L)
+#define MAC_PCS_XXV_ANEG_STA_1_RX_PAUSE_EN (3886L)
+#define MAC_PCS_XXV_ANEG_STA_1_TX_PAUSE_EN (3887L)
+#define MAC_PCS_XXV_ANEG_STA_2 (3888L)
+#define MAC_PCS_XXV_ANEG_STA_2_COMPLETE (3889L)
+#define MAC_PCS_XXV_ANEG_STA_2_FEC74_EN (3890L)
+#define MAC_PCS_XXV_ANEG_STA_2_PAR_D_FAULT (3891L)
+#define MAC_PCS_XXV_ANEG_STA_2_RS_FEC_EN (3892L)
+#define MAC_PCS_XXV_ANEG_STA_2_RX_PAUSE_EN (3893L)
+#define MAC_PCS_XXV_ANEG_STA_2_TX_PAUSE_EN (3894L)
+#define MAC_PCS_XXV_ANEG_STA_3 (3895L)
+#define MAC_PCS_XXV_ANEG_STA_3_COMPLETE (3896L)
+#define MAC_PCS_XXV_ANEG_STA_3_FEC74_EN (3897L)
+#define MAC_PCS_XXV_ANEG_STA_3_PAR_D_FAULT (3898L)
+#define MAC_PCS_XXV_ANEG_STA_3_RS_FEC_EN (3899L)
+#define MAC_PCS_XXV_ANEG_STA_3_RX_PAUSE_EN (3900L)
+#define MAC_PCS_XXV_ANEG_STA_3_TX_PAUSE_EN (3901L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY (3902L)
+#define MAC_PCS_XXV_CLK_REF_ACTIVITY_COUNT (3903L)
+#define MAC_PCS_XXV_CORE_CONF_0 (3904L)
+#define MAC_PCS_XXV_CORE_CONF_0_ENHANCED_TS (3905L)
+#define MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE (3906L)
+#define MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK (3907L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE (3908L)
+#define MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC (3909L)
+#define MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP (3910L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE (3911L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS (3912L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS (3913L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE (3914L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI (3915L)
+#define MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI (3916L)
+#define MAC_PCS_XXV_CORE_CONF_1 (3917L)
+#define MAC_PCS_XXV_CORE_CONF_1_ENHANCED_TS (3918L)
+#define MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE (3919L)
+#define MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK (3920L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE (3921L)
+#define MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC (3922L)
+#define MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP (3923L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE (3924L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS (3925L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS (3926L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE (3927L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI (3928L)
+#define MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI (3929L)
+#define MAC_PCS_XXV_CORE_CONF_2 (3930L)
+#define MAC_PCS_XXV_CORE_CONF_2_ENHANCED_TS (3931L)
+#define MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE (3932L)
+#define MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK (3933L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE (3934L)
+#define MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC (3935L)
+#define MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP (3936L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE (3937L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS (3938L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS (3939L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE (3940L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI (3941L)
+#define MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI (3942L)
+#define MAC_PCS_XXV_CORE_CONF_3 (3943L)
+#define MAC_PCS_XXV_CORE_CONF_3_ENHANCED_TS (3944L)
+#define MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE (3945L)
+#define MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK (3946L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE (3947L)
+#define MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC (3948L)
+#define MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP (3949L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE (3950L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS (3951L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS (3952L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE (3953L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI (3954L)
+#define MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI (3955L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0 (3956L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_DEBOUNCE_LATENCY (3957L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_FORCE_LINK_DOWN (3958L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_LINKUP_LATENCY (3959L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL (3960L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1 (3961L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_DEBOUNCE_LATENCY (3962L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_FORCE_LINK_DOWN (3963L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_LINKUP_LATENCY (3964L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL (3965L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2 (3966L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_DEBOUNCE_LATENCY (3967L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_FORCE_LINK_DOWN (3968L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_LINKUP_LATENCY (3969L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL (3970L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3 (3971L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_DEBOUNCE_LATENCY (3972L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_FORCE_LINK_DOWN (3973L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_LINKUP_LATENCY (3974L)
+#define MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL (3975L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0 (3976L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_0_FEC74_CCW_CNT (3977L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1 (3978L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_1_FEC74_CCW_CNT (3979L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2 (3980L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_2_FEC74_CCW_CNT (3981L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3 (3982L)
+#define MAC_PCS_XXV_FEC74_CCW_CNT_3_FEC74_CCW_CNT (3983L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0 (3984L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_FEC74_ERRORS_TO_PCS (3985L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_RX_FEC74_ENABLE (3986L)
+#define MAC_PCS_XXV_FEC74_CONFIG_0_TX_FEC74_ENABLE (3987L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1 (3988L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_FEC74_ERRORS_TO_PCS (3989L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_RX_FEC74_ENABLE (3990L)
+#define MAC_PCS_XXV_FEC74_CONFIG_1_TX_FEC74_ENABLE (3991L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2 (3992L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_FEC74_ERRORS_TO_PCS (3993L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_RX_FEC74_ENABLE (3994L)
+#define MAC_PCS_XXV_FEC74_CONFIG_2_TX_FEC74_ENABLE (3995L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3 (3996L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_FEC74_ERRORS_TO_PCS (3997L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_RX_FEC74_ENABLE (3998L)
+#define MAC_PCS_XXV_FEC74_CONFIG_3_TX_FEC74_ENABLE (3999L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0 (4000L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_0_FEC74_UCW_CNT (4001L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1 (4002L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_1_FEC74_UCW_CNT (4003L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2 (4004L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_2_FEC74_UCW_CNT (4005L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3 (4006L)
+#define MAC_PCS_XXV_FEC74_UCW_CNT_3_FEC74_UCW_CNT (4007L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0 (4008L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_CDR_HOLD (4009L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST (4010L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN (4011L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY (4012L)
+#define MAC_PCS_XXV_GTY_CTL_RX_0_RATE (4013L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1 (4014L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_CDR_HOLD (4015L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST (4016L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN (4017L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY (4018L)
+#define MAC_PCS_XXV_GTY_CTL_RX_1_RATE (4019L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2 (4020L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_CDR_HOLD (4021L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST (4022L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN (4023L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY (4024L)
+#define MAC_PCS_XXV_GTY_CTL_RX_2_RATE (4025L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3 (4026L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_CDR_HOLD (4027L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST (4028L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN (4029L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY (4030L)
+#define MAC_PCS_XXV_GTY_CTL_RX_3_RATE (4031L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0 (4032L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT (4033L)
+#define MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY (4034L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1 (4035L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT (4036L)
+#define MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY (4037L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2 (4038L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT (4039L)
+#define MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY (4040L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3 (4041L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT (4042L)
+#define MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY (4043L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0 (4044L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL (4045L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL_ADJUSTED (4046L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1 (4047L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL (4048L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL_ADJUSTED (4049L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2 (4050L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL (4051L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL_ADJUSTED (4052L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3 (4053L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL (4054L)
+#define MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL_ADJUSTED (4055L)
+#define MAC_PCS_XXV_GTY_LOOP_0 (4056L)
+#define MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP (4057L)
+#define MAC_PCS_XXV_GTY_LOOP_1 (4058L)
+#define MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP (4059L)
+#define MAC_PCS_XXV_GTY_LOOP_2 (4060L)
+#define MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP (4061L)
+#define MAC_PCS_XXV_GTY_LOOP_3 (4062L)
+#define MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP (4063L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0 (4064L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_0_TX_MAIN_CTL (4065L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1 (4066L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_1_TX_MAIN_CTL (4067L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2 (4068L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_2_TX_MAIN_CTL (4069L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3 (4070L)
+#define MAC_PCS_XXV_GTY_MAIN_CTL_3_TX_MAIN_CTL (4071L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0 (4072L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR (4073L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR_ADJUSTED (4074L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1 (4075L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR (4076L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR_ADJUSTED (4077L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2 (4078L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR (4079L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR_ADJUSTED (4080L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3 (4081L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR (4082L)
+#define MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR_ADJUSTED (4083L)
+#define MAC_PCS_XXV_GTY_PRBS_0 (4084L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR (4085L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_ERR_INS (4086L)
+#define MAC_PCS_XXV_GTY_PRBS_0_PRBS_RST (4087L)
+#define MAC_PCS_XXV_GTY_PRBS_0_RX_PRBS_SEL (4088L)
+#define MAC_PCS_XXV_GTY_PRBS_0_TX_PRBS_SEL (4089L)
+#define MAC_PCS_XXV_GTY_PRBS_1 (4090L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR (4091L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_ERR_INS (4092L)
+#define MAC_PCS_XXV_GTY_PRBS_1_PRBS_RST (4093L)
+#define MAC_PCS_XXV_GTY_PRBS_1_RX_PRBS_SEL (4094L)
+#define MAC_PCS_XXV_GTY_PRBS_1_TX_PRBS_SEL (4095L)
+#define MAC_PCS_XXV_GTY_PRBS_2 (4096L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR (4097L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_ERR_INS (4098L)
+#define MAC_PCS_XXV_GTY_PRBS_2_PRBS_RST (4099L)
+#define MAC_PCS_XXV_GTY_PRBS_2_RX_PRBS_SEL (4100L)
+#define MAC_PCS_XXV_GTY_PRBS_2_TX_PRBS_SEL (4101L)
+#define MAC_PCS_XXV_GTY_PRBS_3 (4102L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR (4103L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_ERR_INS (4104L)
+#define MAC_PCS_XXV_GTY_PRBS_3_PRBS_RST (4105L)
+#define MAC_PCS_XXV_GTY_PRBS_3_RX_PRBS_SEL (4106L)
+#define MAC_PCS_XXV_GTY_PRBS_3_TX_PRBS_SEL (4107L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0 (4108L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_0_COUNT (4109L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1 (4110L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_1_COUNT (4111L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2 (4112L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_2_COUNT (4113L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3 (4114L)
+#define MAC_PCS_XXV_GTY_PRBS_CNT_3_COUNT (4115L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0 (4116L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR (4117L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR_ADJUSTED (4118L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1 (4119L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR (4120L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR_ADJUSTED (4121L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2 (4122L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR (4123L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR_ADJUSTED (4124L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3 (4125L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR (4126L)
+#define MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR_ADJUSTED (4127L)
+#define MAC_PCS_XXV_GTY_STATUS_0 (4128L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_POWERGOOD (4129L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_RXBUFSTATUS (4130L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_STARTOFSEQ (4131L)
+#define MAC_PCS_XXV_GTY_STATUS_0_GT_TXBUFSTATUS (4132L)
+#define MAC_PCS_XXV_GTY_STATUS_1 (4133L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_POWERGOOD (4134L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_RXBUFSTATUS (4135L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_STARTOFSEQ (4136L)
+#define MAC_PCS_XXV_GTY_STATUS_1_GT_TXBUFSTATUS (4137L)
+#define MAC_PCS_XXV_GTY_STATUS_2 (4138L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_POWERGOOD (4139L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_RXBUFSTATUS (4140L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_STARTOFSEQ (4141L)
+#define MAC_PCS_XXV_GTY_STATUS_2_GT_TXBUFSTATUS (4142L)
+#define MAC_PCS_XXV_GTY_STATUS_3 (4143L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_POWERGOOD (4144L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_RXBUFSTATUS (4145L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_STARTOFSEQ (4146L)
+#define MAC_PCS_XXV_GTY_STATUS_3_GT_TXBUFSTATUS (4147L)
+#define MAC_PCS_XXV_LATENCY_0 (4148L)
+#define MAC_PCS_XXV_LATENCY_0_RX_LATENCY_MEAS (4149L)
+#define MAC_PCS_XXV_LATENCY_1 (4150L)
+#define MAC_PCS_XXV_LATENCY_1_RX_LATENCY_MEAS (4151L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0 (4152L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_MAIN (4153L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_POST (4154L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_DEC_PRE (4155L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_MAIN (4156L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_POST (4157L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INC_PRE (4158L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_INIT (4159L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_0_PRESET (4160L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1 (4161L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_MAIN (4162L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_POST (4163L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_DEC_PRE (4164L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_MAIN (4165L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_POST (4166L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INC_PRE (4167L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_INIT (4168L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_1_PRESET (4169L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2 (4170L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_MAIN (4171L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_POST (4172L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_DEC_PRE (4173L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_MAIN (4174L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_POST (4175L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INC_PRE (4176L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_INIT (4177L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_2_PRESET (4178L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3 (4179L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_MAIN (4180L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_POST (4181L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_DEC_PRE (4182L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_MAIN (4183L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_POST (4184L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INC_PRE (4185L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_INIT (4186L)
+#define MAC_PCS_XXV_LE_LT_COEF_RECEIVED_3_PRESET (4187L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0 (4188L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_MAIN_STA (4189L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_POST_STA (4190L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_0_PRE_STA (4191L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1 (4192L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_MAIN_STA (4193L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_POST_STA (4194L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_1_PRE_STA (4195L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2 (4196L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_MAIN_STA (4197L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_POST_STA (4198L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_2_PRE_STA (4199L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3 (4200L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_MAIN_STA (4201L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_POST_STA (4202L)
+#define MAC_PCS_XXV_LE_LT_STA_SEND_3_PRE_STA (4203L)
+#define MAC_PCS_XXV_LINK_SPEED_0 (4204L)
+#define MAC_PCS_XXV_LINK_SPEED_0_10G (4205L)
+#define MAC_PCS_XXV_LINK_SPEED_0_SPEED (4206L)
+#define MAC_PCS_XXV_LINK_SPEED_0_TOGGLE (4207L)
+#define MAC_PCS_XXV_LINK_SPEED_1 (4208L)
+#define MAC_PCS_XXV_LINK_SPEED_1_10G (4209L)
+#define MAC_PCS_XXV_LINK_SPEED_1_SPEED (4210L)
+#define MAC_PCS_XXV_LINK_SPEED_1_TOGGLE (4211L)
+#define MAC_PCS_XXV_LINK_SPEED_2 (4212L)
+#define MAC_PCS_XXV_LINK_SPEED_2_10G (4213L)
+#define MAC_PCS_XXV_LINK_SPEED_2_SPEED (4214L)
+#define MAC_PCS_XXV_LINK_SPEED_2_TOGGLE (4215L)
+#define MAC_PCS_XXV_LINK_SPEED_3 (4216L)
+#define MAC_PCS_XXV_LINK_SPEED_3_10G (4217L)
+#define MAC_PCS_XXV_LINK_SPEED_3_SPEED (4218L)
+#define MAC_PCS_XXV_LINK_SPEED_3_TOGGLE (4219L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0 (4220L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ABS (4221L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_COMPLETE (4222L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_ANEG_CONSORTIUM_MISMATCH (4223L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_INTERNAL_LOCAL_FAULT (4224L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS (4225L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT (4226L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT (4227L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT (4228L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT (4229L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_FEC74_LOCK_ERROR (4230L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE (4231L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE (4232L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER (4233L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_LOCAL_FAULT (4234L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LH_TX_UNDERRUN (4235L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT (4236L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE (4237L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK (4238L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK (4239L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT (4240L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT (4241L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_LT_DONE (4242L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR (4243L)
+#define MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE (4244L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1 (4245L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ABS (4246L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_COMPLETE (4247L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_ANEG_CONSORTIUM_MISMATCH (4248L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_INTERNAL_LOCAL_FAULT (4249L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS (4250L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT (4251L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT (4252L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT (4253L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT (4254L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_FEC74_LOCK_ERROR (4255L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE (4256L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE (4257L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER (4258L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_LOCAL_FAULT (4259L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LH_TX_UNDERRUN (4260L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT (4261L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE (4262L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK (4263L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK (4264L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT (4265L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT (4266L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_LT_DONE (4267L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR (4268L)
+#define MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE (4269L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2 (4270L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ABS (4271L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_COMPLETE (4272L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_ANEG_CONSORTIUM_MISMATCH (4273L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_INTERNAL_LOCAL_FAULT (4274L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS (4275L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT (4276L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT (4277L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT (4278L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT (4279L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_FEC74_LOCK_ERROR (4280L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE (4281L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE (4282L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER (4283L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_LOCAL_FAULT (4284L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LH_TX_UNDERRUN (4285L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT (4286L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE (4287L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK (4288L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK (4289L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT (4290L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT (4291L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_LT_DONE (4292L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR (4293L)
+#define MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE (4294L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3 (4295L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ABS (4296L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_COMPLETE (4297L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_ANEG_CONSORTIUM_MISMATCH (4298L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_INTERNAL_LOCAL_FAULT (4299L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS (4300L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT (4301L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT (4302L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT (4303L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT (4304L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_FEC74_LOCK_ERROR (4305L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE (4306L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE (4307L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER (4308L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_LOCAL_FAULT (4309L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LH_TX_UNDERRUN (4310L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT (4311L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE (4312L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK (4313L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK (4314L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT (4315L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT (4316L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_LT_DONE (4317L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR (4318L)
+#define MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE (4319L)
+#define MAC_PCS_XXV_LT_CONF_0 (4320L)
+#define MAC_PCS_XXV_LT_CONF_0_ENABLE (4321L)
+#define MAC_PCS_XXV_LT_CONF_0_RESTART (4322L)
+#define MAC_PCS_XXV_LT_CONF_0_SEED (4323L)
+#define MAC_PCS_XXV_LT_CONF_1 (4324L)
+#define MAC_PCS_XXV_LT_CONF_1_ENABLE (4325L)
+#define MAC_PCS_XXV_LT_CONF_1_RESTART (4326L)
+#define MAC_PCS_XXV_LT_CONF_1_SEED (4327L)
+#define MAC_PCS_XXV_LT_CONF_2 (4328L)
+#define MAC_PCS_XXV_LT_CONF_2_ENABLE (4329L)
+#define MAC_PCS_XXV_LT_CONF_2_RESTART (4330L)
+#define MAC_PCS_XXV_LT_CONF_2_SEED (4331L)
+#define MAC_PCS_XXV_LT_CONF_3 (4332L)
+#define MAC_PCS_XXV_LT_CONF_3_ENABLE (4333L)
+#define MAC_PCS_XXV_LT_CONF_3_RESTART (4334L)
+#define MAC_PCS_XXV_LT_CONF_3_SEED (4335L)
+#define MAC_PCS_XXV_LT_STA_0 (4336L)
+#define MAC_PCS_XXV_LT_STA_0_DONE (4337L)
+#define MAC_PCS_XXV_LT_STA_0_FAIL (4338L)
+#define MAC_PCS_XXV_LT_STA_0_LOCK (4339L)
+#define MAC_PCS_XXV_LT_STA_0_TRAIN (4340L)
+#define MAC_PCS_XXV_LT_STA_1 (4341L)
+#define MAC_PCS_XXV_LT_STA_1_DONE (4342L)
+#define MAC_PCS_XXV_LT_STA_1_FAIL (4343L)
+#define MAC_PCS_XXV_LT_STA_1_LOCK (4344L)
+#define MAC_PCS_XXV_LT_STA_1_TRAIN (4345L)
+#define MAC_PCS_XXV_LT_STA_2 (4346L)
+#define MAC_PCS_XXV_LT_STA_2_DONE (4347L)
+#define MAC_PCS_XXV_LT_STA_2_FAIL (4348L)
+#define MAC_PCS_XXV_LT_STA_2_LOCK (4349L)
+#define MAC_PCS_XXV_LT_STA_2_TRAIN (4350L)
+#define MAC_PCS_XXV_LT_STA_3 (4351L)
+#define MAC_PCS_XXV_LT_STA_3_DONE (4352L)
+#define MAC_PCS_XXV_LT_STA_3_FAIL (4353L)
+#define MAC_PCS_XXV_LT_STA_3_LOCK (4354L)
+#define MAC_PCS_XXV_LT_STA_3_TRAIN (4355L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0 (4356L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_ATTRIB (4357L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_NEXT (4358L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_0_PREV (4359L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1 (4360L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_ATTRIB (4361L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_NEXT (4362L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_1_PREV (4363L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2 (4364L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_ATTRIB (4365L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_NEXT (4366L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_2_PREV (4367L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3 (4368L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_ATTRIB (4369L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_NEXT (4370L)
+#define MAC_PCS_XXV_LT_TABLE_READBACK_3_PREV (4371L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0 (4372L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_ATTRIB (4373L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_NEXT (4374L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_PREV (4375L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_SEL (4376L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_TABLE_ADDR (4377L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_0_UPDATE (4378L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1 (4379L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_ATTRIB (4380L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_NEXT (4381L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_PREV (4382L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_SEL (4383L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_TABLE_ADDR (4384L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_1_UPDATE (4385L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2 (4386L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_ATTRIB (4387L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_NEXT (4388L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_PREV (4389L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_SEL (4390L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_TABLE_ADDR (4391L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_2_UPDATE (4392L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3 (4393L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_ATTRIB (4394L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_NEXT (4395L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_PREV (4396L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_SEL (4397L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_TABLE_ADDR (4398L)
+#define MAC_PCS_XXV_LT_UPDATE_COEF_TABLE_3_UPDATE (4399L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0 (4400L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_0_RX_MAX_LENGTH (4401L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1 (4402L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_1_RX_MAX_LENGTH (4403L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2 (4404L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_2_RX_MAX_LENGTH (4405L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3 (4406L)
+#define MAC_PCS_XXV_MAC_RX_MAX_LENGTH_3_RX_MAX_LENGTH (4407L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0 (4408L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_MIN_RX_FRAME (4409L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_0_RX_MIN_LENGTH (4410L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1 (4411L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_MIN_RX_FRAME (4412L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_1_RX_MIN_LENGTH (4413L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2 (4414L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_MIN_RX_FRAME (4415L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_2_RX_MIN_LENGTH (4416L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3 (4417L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_MIN_RX_FRAME (4418L)
+#define MAC_PCS_XXV_MAC_RX_MIN_LENGTH_3_RX_MIN_LENGTH (4419L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0 (4420L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_0_MAX_LEN (4421L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1 (4422L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_1_MAX_LEN (4423L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2 (4424L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_2_MAX_LEN (4425L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3 (4426L)
+#define MAC_PCS_XXV_MAX_PKT_LEN_3_MAX_LEN (4427L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0 (4428L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_MAIN (4429L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_POST (4430L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ADJ_PRE (4431L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_ENABLE (4432L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_INIT (4433L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_PRESET (4434L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_0_RX_READY (4435L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1 (4436L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_MAIN (4437L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_POST (4438L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ADJ_PRE (4439L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_ENABLE (4440L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_INIT (4441L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_PRESET (4442L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_1_RX_READY (4443L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2 (4444L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_MAIN (4445L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_POST (4446L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ADJ_PRE (4447L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_ENABLE (4448L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_INIT (4449L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_PRESET (4450L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_2_RX_READY (4451L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3 (4452L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_MAIN (4453L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_POST (4454L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ADJ_PRE (4455L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_ENABLE (4456L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_INIT (4457L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_PRESET (4458L)
+#define MAC_PCS_XXV_RE_LT_COEF_TO_TX_3_RX_READY (4459L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0 (4460L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_MAIN_STA (4461L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_POST_STA (4462L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_0_PRE_STA (4463L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1 (4464L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_MAIN_STA (4465L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_POST_STA (4466L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_1_PRE_STA (4467L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2 (4468L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_MAIN_STA (4469L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_POST_STA (4470L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_2_PRE_STA (4471L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3 (4472L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_MAIN_STA (4473L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_POST_STA (4474L)
+#define MAC_PCS_XXV_RE_LT_STA_RECEIVED_3_PRE_STA (4475L)
+#define MAC_PCS_XXV_RST_0 (4476L)
+#define MAC_PCS_XXV_RST_0_MAC_PCS (4477L)
+#define MAC_PCS_XXV_RST_1 (4478L)
+#define MAC_PCS_XXV_RST_1_MAC_PCS (4479L)
+#define MAC_PCS_XXV_RST_2 (4480L)
+#define MAC_PCS_XXV_RST_2_MAC_PCS (4481L)
+#define MAC_PCS_XXV_RST_3 (4482L)
+#define MAC_PCS_XXV_RST_3_MAC_PCS (4483L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0 (4484L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT (4485L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1 (4486L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT (4487L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2 (4488L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT (4489L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3 (4490L)
+#define MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT (4491L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0 (4492L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_CONSORTIUM (4493L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_CORRECTION (4494L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE (4495L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_IEEE_ERROR_INDICATION (4496L)
+#define MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_INDICATION (4497L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1 (4498L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_CONSORTIUM (4499L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_CORRECTION (4500L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE (4501L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_IEEE_ERROR_INDICATION (4502L)
+#define MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_INDICATION (4503L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2 (4504L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_CONSORTIUM (4505L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_CORRECTION (4506L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE (4507L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_IEEE_ERROR_INDICATION (4508L)
+#define MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_INDICATION (4509L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3 (4510L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_CONSORTIUM (4511L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_CORRECTION (4512L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE (4513L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_IEEE_ERROR_INDICATION (4514L)
+#define MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_INDICATION (4515L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0 (4516L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_0_RS_FEC_ERR_CNT (4517L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1 (4518L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_1_RS_FEC_ERR_CNT (4519L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2 (4520L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_2_RS_FEC_ERR_CNT (4521L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3 (4522L)
+#define MAC_PCS_XXV_RS_FEC_ERR_CNT_3_RS_FEC_ERR_CNT (4523L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0 (4524L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT (4525L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1 (4526L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT (4527L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2 (4528L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT (4529L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3 (4530L)
+#define MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT (4531L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0 (4532L)
+#define MAC_PCS_XXV_RX_BAD_FCS_0_COUNT (4533L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1 (4534L)
+#define MAC_PCS_XXV_RX_BAD_FCS_1_COUNT (4535L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2 (4536L)
+#define MAC_PCS_XXV_RX_BAD_FCS_2_COUNT (4537L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3 (4538L)
+#define MAC_PCS_XXV_RX_BAD_FCS_3_COUNT (4539L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0 (4540L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_0_COUNT (4541L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1 (4542L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_1_COUNT (4543L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2 (4544L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_2_COUNT (4545L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3 (4546L)
+#define MAC_PCS_XXV_RX_FRAMING_ERROR_3_COUNT (4547L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0 (4548L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_0_COUNT (4549L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1 (4550L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_1_COUNT (4551L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2 (4552L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_2_COUNT (4553L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3 (4554L)
+#define MAC_PCS_XXV_RX_GOOD_BYTES_3_COUNT (4555L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0 (4556L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_0_COUNT (4557L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1 (4558L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_1_COUNT (4559L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2 (4560L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_2_COUNT (4561L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3 (4562L)
+#define MAC_PCS_XXV_RX_GOOD_PACKETS_3_COUNT (4563L)
+#define MAC_PCS_XXV_RX_LATENCY_0 (4564L)
+#define MAC_PCS_XXV_RX_LATENCY_0_LATENCY (4565L)
+#define MAC_PCS_XXV_RX_LATENCY_1 (4566L)
+#define MAC_PCS_XXV_RX_LATENCY_1_LATENCY (4567L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0 (4568L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_0_COUNT (4569L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1 (4570L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_1_COUNT (4571L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2 (4572L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_2_COUNT (4573L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3 (4574L)
+#define MAC_PCS_XXV_RX_TOTAL_BYTES_3_COUNT (4575L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0 (4576L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_0_COUNT (4577L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1 (4578L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_1_COUNT (4579L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2 (4580L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_2_COUNT (4581L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3 (4582L)
+#define MAC_PCS_XXV_RX_TOTAL_PACKETS_3_COUNT (4583L)
+#define MAC_PCS_XXV_SUB_RST_0 (4584L)
+#define MAC_PCS_XXV_SUB_RST_0_AN_LT (4585L)
+#define MAC_PCS_XXV_SUB_RST_0_QPLL (4586L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_BUF (4587L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA (4588L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS (4589L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PCS (4590L)
+#define MAC_PCS_XXV_SUB_RST_0_RX_PMA (4591L)
+#define MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL (4592L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA (4593L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS (4594L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PCS (4595L)
+#define MAC_PCS_XXV_SUB_RST_0_TX_PMA (4596L)
+#define MAC_PCS_XXV_SUB_RST_1 (4597L)
+#define MAC_PCS_XXV_SUB_RST_1_AN_LT (4598L)
+#define MAC_PCS_XXV_SUB_RST_1_QPLL (4599L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_BUF (4600L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA (4601L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS (4602L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PCS (4603L)
+#define MAC_PCS_XXV_SUB_RST_1_RX_PMA (4604L)
+#define MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL (4605L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA (4606L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS (4607L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PCS (4608L)
+#define MAC_PCS_XXV_SUB_RST_1_TX_PMA (4609L)
+#define MAC_PCS_XXV_SUB_RST_2 (4610L)
+#define MAC_PCS_XXV_SUB_RST_2_AN_LT (4611L)
+#define MAC_PCS_XXV_SUB_RST_2_QPLL (4612L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_BUF (4613L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA (4614L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS (4615L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PCS (4616L)
+#define MAC_PCS_XXV_SUB_RST_2_RX_PMA (4617L)
+#define MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL (4618L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA (4619L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS (4620L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PCS (4621L)
+#define MAC_PCS_XXV_SUB_RST_2_TX_PMA (4622L)
+#define MAC_PCS_XXV_SUB_RST_3 (4623L)
+#define MAC_PCS_XXV_SUB_RST_3_AN_LT (4624L)
+#define MAC_PCS_XXV_SUB_RST_3_QPLL (4625L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_BUF (4626L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA (4627L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS (4628L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PCS (4629L)
+#define MAC_PCS_XXV_SUB_RST_3_RX_PMA (4630L)
+#define MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL (4631L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA (4632L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS (4633L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PCS (4634L)
+#define MAC_PCS_XXV_SUB_RST_3_TX_PMA (4635L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0 (4636L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK (4637L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST (4638L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST (4639L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1 (4640L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK (4641L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST (4642L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST (4643L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2 (4644L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK (4645L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST (4646L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST (4647L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3 (4648L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK (4649L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST (4650L)
+#define MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST (4651L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0 (4652L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY (4653L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY (4654L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1 (4655L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY (4656L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY (4657L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2 (4658L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY (4659L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY (4660L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3 (4661L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY (4662L)
+#define MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY (4663L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0 (4664L)
+#define MAC_PCS_XXV_TX_BAD_FCS_0_COUNT (4665L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1 (4666L)
+#define MAC_PCS_XXV_TX_BAD_FCS_1_COUNT (4667L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2 (4668L)
+#define MAC_PCS_XXV_TX_BAD_FCS_2_COUNT (4669L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3 (4670L)
+#define MAC_PCS_XXV_TX_BAD_FCS_3_COUNT (4671L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0 (4672L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_0_COUNT (4673L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1 (4674L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_1_COUNT (4675L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2 (4676L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_2_COUNT (4677L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3 (4678L)
+#define MAC_PCS_XXV_TX_FRAME_ERROR_3_COUNT (4679L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0 (4680L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_0_COUNT (4681L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1 (4682L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_1_COUNT (4683L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2 (4684L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_2_COUNT (4685L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3 (4686L)
+#define MAC_PCS_XXV_TX_GOOD_BYTES_3_COUNT (4687L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0 (4688L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_0_COUNT (4689L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1 (4690L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_1_COUNT (4691L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2 (4692L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_2_COUNT (4693L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3 (4694L)
+#define MAC_PCS_XXV_TX_GOOD_PACKETS_3_COUNT (4695L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0 (4696L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_0_COUNT (4697L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1 (4698L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_1_COUNT (4699L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2 (4700L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_2_COUNT (4701L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3 (4702L)
+#define MAC_PCS_XXV_TX_TOTAL_BYTES_3_COUNT (4703L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0 (4704L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_0_COUNT (4705L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1 (4706L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_1_COUNT (4707L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2 (4708L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_2_COUNT (4709L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3 (4710L)
+#define MAC_PCS_XXV_TX_TOTAL_PACKETS_3_COUNT (4711L)
+/* MAC_RX */
+#define MAC_RX_BAD_FCS (4712L)
+#define MAC_RX_BAD_FCS_COUNT (4713L)
+#define MAC_RX_BAD_PREAMBLE (4714L)
+#define MAC_RX_BAD_PREAMBLE_COUNT (4715L)
+#define MAC_RX_BAD_SFD (4716L)
+#define MAC_RX_BAD_SFD_COUNT (4717L)
+#define MAC_RX_BROADCAST (4718L)
+#define MAC_RX_BROADCAST_COUNT (4719L)
+#define MAC_RX_FRAGMENT (4720L)
+#define MAC_RX_FRAGMENT_COUNT (4721L)
+#define MAC_RX_INRANGEERR (4722L)
+#define MAC_RX_INRANGEERR_COUNT (4723L)
+#define MAC_RX_JABBER (4724L)
+#define MAC_RX_JABBER_COUNT (4725L)
+#define MAC_RX_MULTICAST (4726L)
+#define MAC_RX_MULTICAST_COUNT (4727L)
+#define MAC_RX_OVERSIZE (4728L)
+#define MAC_RX_OVERSIZE_COUNT (4729L)
+#define MAC_RX_PACKET_1024_1518_BYTES (4730L)
+#define MAC_RX_PACKET_1024_1518_BYTES_COUNT (4731L)
+#define MAC_RX_PACKET_128_255_BYTES (4732L)
+#define MAC_RX_PACKET_128_255_BYTES_COUNT (4733L)
+#define MAC_RX_PACKET_1519_1522_BYTES (4734L)
+#define MAC_RX_PACKET_1519_1522_BYTES_COUNT (4735L)
+#define MAC_RX_PACKET_1523_1548_BYTES (4736L)
+#define MAC_RX_PACKET_1523_1548_BYTES_COUNT (4737L)
+#define MAC_RX_PACKET_1549_2047_BYTES (4738L)
+#define MAC_RX_PACKET_1549_2047_BYTES_COUNT (4739L)
+#define MAC_RX_PACKET_2048_4095_BYTES (4740L)
+#define MAC_RX_PACKET_2048_4095_BYTES_COUNT (4741L)
+#define MAC_RX_PACKET_256_511_BYTES (4742L)
+#define MAC_RX_PACKET_256_511_BYTES_COUNT (4743L)
+#define MAC_RX_PACKET_4096_8191_BYTES (4744L)
+#define MAC_RX_PACKET_4096_8191_BYTES_COUNT (4745L)
+#define MAC_RX_PACKET_512_1023_BYTES (4746L)
+#define MAC_RX_PACKET_512_1023_BYTES_COUNT (4747L)
+#define MAC_RX_PACKET_64_BYTES (4748L)
+#define MAC_RX_PACKET_64_BYTES_COUNT (4749L)
+#define MAC_RX_PACKET_65_127_BYTES (4750L)
+#define MAC_RX_PACKET_65_127_BYTES_COUNT (4751L)
+#define MAC_RX_PACKET_8192_9215_BYTES (4752L)
+#define MAC_RX_PACKET_8192_9215_BYTES_COUNT (4753L)
+#define MAC_RX_PACKET_BAD_FCS (4754L)
+#define MAC_RX_PACKET_BAD_FCS_COUNT (4755L)
+#define MAC_RX_PACKET_LARGE (4756L)
+#define MAC_RX_PACKET_LARGE_COUNT (4757L)
+#define MAC_RX_PACKET_SMALL (4758L)
+#define MAC_RX_PACKET_SMALL_COUNT (4759L)
+#define MAC_RX_STOMPED_FCS (4760L)
+#define MAC_RX_STOMPED_FCS_COUNT (4761L)
+#define MAC_RX_TOOLONG (4762L)
+#define MAC_RX_TOOLONG_COUNT (4763L)
+#define MAC_RX_TOTAL_BYTES (4764L)
+#define MAC_RX_TOTAL_BYTES_COUNT (4765L)
+#define MAC_RX_TOTAL_GOOD_BYTES (4766L)
+#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (4767L)
+#define MAC_RX_TOTAL_GOOD_PACKETS (4768L)
+#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (4769L)
+#define MAC_RX_TOTAL_PACKETS (4770L)
+#define MAC_RX_TOTAL_PACKETS_COUNT (4771L)
+#define MAC_RX_TRUNCATED (4772L)
+#define MAC_RX_TRUNCATED_COUNT (4773L)
+#define MAC_RX_UNDERSIZE (4774L)
+#define MAC_RX_UNDERSIZE_COUNT (4775L)
+#define MAC_RX_UNICAST (4776L)
+#define MAC_RX_UNICAST_COUNT (4777L)
+#define MAC_RX_VLAN (4778L)
+#define MAC_RX_VLAN_COUNT (4779L)
+/* MAC_TFG */
+#define MAC_TFG_TFG_ADDR (4780L)
+#define MAC_TFG_TFG_ADDR_ADR (4781L)
+#define MAC_TFG_TFG_ADDR_RDENA (4782L)
+#define MAC_TFG_TFG_ADDR_RD_DONE (4783L)
+#define MAC_TFG_TFG_CTRL (4784L)
+#define MAC_TFG_TFG_CTRL_ID_ENA (4785L)
+#define MAC_TFG_TFG_CTRL_ID_POS (4786L)
+#define MAC_TFG_TFG_CTRL_RESTART (4787L)
+#define MAC_TFG_TFG_CTRL_TG_ACT (4788L)
+#define MAC_TFG_TFG_CTRL_TG_ENA (4789L)
+#define MAC_TFG_TFG_CTRL_TIME_MODE (4790L)
+#define MAC_TFG_TFG_CTRL_WRAP (4791L)
+#define MAC_TFG_TFG_DATA (4792L)
+#define MAC_TFG_TFG_DATA_GAP (4793L)
+#define MAC_TFG_TFG_DATA_ID (4794L)
+#define MAC_TFG_TFG_DATA_LENGTH (4795L)
+#define MAC_TFG_TFG_FRAME_HDR (4796L)
+#define MAC_TFG_TFG_FRAME_HDR_HDR (4797L)
+#define MAC_TFG_TFG_REPETITION (4798L)
+#define MAC_TFG_TFG_REPETITION_CNT (4799L)
+/* MAC_TX */
+#define MAC_TX_BAD_FCS (4800L)
+#define MAC_TX_BAD_FCS_COUNT (4801L)
+#define MAC_TX_BROADCAST (4802L)
+#define MAC_TX_BROADCAST_COUNT (4803L)
+#define MAC_TX_FRAME_ERRORS (4804L)
+#define MAC_TX_FRAME_ERRORS_COUNT (4805L)
+#define MAC_TX_MULTICAST (4806L)
+#define MAC_TX_MULTICAST_COUNT (4807L)
+#define MAC_TX_PACKET_1024_1518_BYTES (4808L)
+#define MAC_TX_PACKET_1024_1518_BYTES_COUNT (4809L)
+#define MAC_TX_PACKET_128_255_BYTES (4810L)
+#define MAC_TX_PACKET_128_255_BYTES_COUNT (4811L)
+#define MAC_TX_PACKET_1519_1522_BYTES (4812L)
+#define MAC_TX_PACKET_1519_1522_BYTES_COUNT (4813L)
+#define MAC_TX_PACKET_1523_1548_BYTES (4814L)
+#define MAC_TX_PACKET_1523_1548_BYTES_COUNT (4815L)
+#define MAC_TX_PACKET_1549_2047_BYTES (4816L)
+#define MAC_TX_PACKET_1549_2047_BYTES_COUNT (4817L)
+#define MAC_TX_PACKET_2048_4095_BYTES (4818L)
+#define MAC_TX_PACKET_2048_4095_BYTES_COUNT (4819L)
+#define MAC_TX_PACKET_256_511_BYTES (4820L)
+#define MAC_TX_PACKET_256_511_BYTES_COUNT (4821L)
+#define MAC_TX_PACKET_4096_8191_BYTES (4822L)
+#define MAC_TX_PACKET_4096_8191_BYTES_COUNT (4823L)
+#define MAC_TX_PACKET_512_1023_BYTES (4824L)
+#define MAC_TX_PACKET_512_1023_BYTES_COUNT (4825L)
+#define MAC_TX_PACKET_64_BYTES (4826L)
+#define MAC_TX_PACKET_64_BYTES_COUNT (4827L)
+#define MAC_TX_PACKET_65_127_BYTES (4828L)
+#define MAC_TX_PACKET_65_127_BYTES_COUNT (4829L)
+#define MAC_TX_PACKET_8192_9215_BYTES (4830L)
+#define MAC_TX_PACKET_8192_9215_BYTES_COUNT (4831L)
+#define MAC_TX_PACKET_LARGE (4832L)
+#define MAC_TX_PACKET_LARGE_COUNT (4833L)
+#define MAC_TX_PACKET_SMALL (4834L)
+#define MAC_TX_PACKET_SMALL_COUNT (4835L)
+#define MAC_TX_TOTAL_BYTES (4836L)
+#define MAC_TX_TOTAL_BYTES_COUNT (4837L)
+#define MAC_TX_TOTAL_GOOD_BYTES (4838L)
+#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (4839L)
+#define MAC_TX_TOTAL_GOOD_PACKETS (4840L)
+#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (4841L)
+#define MAC_TX_TOTAL_PACKETS (4842L)
+#define MAC_TX_TOTAL_PACKETS_COUNT (4843L)
+#define MAC_TX_UNICAST (4844L)
+#define MAC_TX_UNICAST_COUNT (4845L)
+#define MAC_TX_VLAN (4846L)
+#define MAC_TX_VLAN_COUNT (4847L)
+/* MCU */
+#define MCU_CSR (4848L)
+#define MCU_CSR_HALT (4849L)
+#define MCU_CSR_PAUSE (4850L)
+#define MCU_CSR_RESET (4851L)
+#define MCU_CSR_RESET_MCU (4852L)
+#define MCU_DRAM_CTRL (4853L)
+#define MCU_DRAM_CTRL_ADR (4854L)
+#define MCU_DRAM_CTRL_CNT (4855L)
+#define MCU_DRAM_RD_DATA (4856L)
+#define MCU_DRAM_RD_DATA_DATA (4857L)
+#define MCU_DRAM_WR_DATA (4858L)
+#define MCU_DRAM_WR_DATA_DATA (4859L)
+#define MCU_IRAM_CTRL (4860L)
+#define MCU_IRAM_CTRL_ADR (4861L)
+#define MCU_IRAM_CTRL_CNT (4862L)
+#define MCU_IRAM_DATA (4863L)
+#define MCU_IRAM_DATA_DATA (4864L)
+/* MDG */
+#define MDG_BSO_CTRL (4865L)
+#define MDG_BSO_CTRL_ADR (4866L)
+#define MDG_BSO_CTRL_CNT (4867L)
+#define MDG_BSO_DATA (4868L)
+#define MDG_BSO_DATA_OFFSET (4869L)
+#define MDG_CONTROL (4870L)
+#define MDG_CONTROL_AE (4871L)
+#define MDG_CONTROL_AS (4872L)
+#define MDG_CONTROL_CE (4873L)
+#define MDG_CONTROL_EN (4874L)
+#define MDG_DBG_EGS_FC0 (4875L)
+#define MDG_DBG_EGS_FC0_BLOCKED (4876L)
+#define MDG_DBG_EGS_FC1 (4877L)
+#define MDG_DBG_EGS_FC1_BLOCKED (4878L)
+#define MDG_DBG_EGS_FC2 (4879L)
+#define MDG_DBG_EGS_FC2_BLOCKED (4880L)
+#define MDG_DBG_EGS_FC3 (4881L)
+#define MDG_DBG_EGS_FC3_BLOCKED (4882L)
+#define MDG_DBG_HBM (4883L)
+#define MDG_DBG_HBM_ADR (4884L)
+#define MDG_DBG_HBM_MAIN (4885L)
+#define MDG_DBG_HBM_MAP (4886L)
+#define MDG_DBG_HBM_META (4887L)
+#define MDG_DBG_HBM_VALID (4888L)
+#define MDG_DBG_IGS_FC0 (4889L)
+#define MDG_DBG_IGS_FC0_BLOCKED (4890L)
+#define MDG_DBG_IGS_FC1 (4891L)
+#define MDG_DBG_IGS_FC1_BLOCKED (4892L)
+#define MDG_DBG_IGS_FC2 (4893L)
+#define MDG_DBG_IGS_FC2_BLOCKED (4894L)
+#define MDG_DBG_IGS_FC3 (4895L)
+#define MDG_DBG_IGS_FC3_BLOCKED (4896L)
+#define MDG_HBM_CTRL (4897L)
+#define MDG_HBM_CTRL_ADR (4898L)
+#define MDG_HBM_CTRL_CNT (4899L)
+#define MDG_HBM_DATA (4900L)
+#define MDG_HBM_DATA_MAIN (4901L)
+#define MDG_HBM_DATA_MAP (4902L)
+#define MDG_HBM_DATA_META (4903L)
+#define MDG_HBS_CTRL (4904L)
+#define MDG_HBS_CTRL_ADR (4905L)
+#define MDG_HBS_CTRL_CNT (4906L)
+#define MDG_HBS_DATA (4907L)
+#define MDG_HBS_DATA_SIZE (4908L)
+#define MDG_MAX_BYTES (4909L)
+#define MDG_MAX_BYTES_MAX (4910L)
+#define MDG_MAX_PACKETS (4911L)
+#define MDG_MAX_PACKETS_MAX (4912L)
+#define MDG_TIMEOUT (4913L)
+#define MDG_TIMEOUT_T (4914L)
+/* MSK */
+#define MSK_RCP_CTRL (4980L)
+#define MSK_RCP_CTRL_ADR (4981L)
+#define MSK_RCP_CTRL_CNT (4982L)
+#define MSK_RCP_DATA (4983L)
+#define MSK_RCP_DATA_MASK_DYN0 (4984L)
+#define MSK_RCP_DATA_MASK_DYN1 (4985L)
+#define MSK_RCP_DATA_MASK_DYN2 (4986L)
+#define MSK_RCP_DATA_MASK_DYN3 (4987L)
+#define MSK_RCP_DATA_MASK_EN0 (4988L)
+#define MSK_RCP_DATA_MASK_EN1 (4989L)
+#define MSK_RCP_DATA_MASK_EN2 (4990L)
+#define MSK_RCP_DATA_MASK_EN3 (4991L)
+#define MSK_RCP_DATA_MASK_LEN0 (4992L)
+#define MSK_RCP_DATA_MASK_LEN1 (4993L)
+#define MSK_RCP_DATA_MASK_LEN2 (4994L)
+#define MSK_RCP_DATA_MASK_LEN3 (4995L)
+#define MSK_RCP_DATA_MASK_OFS0 (4996L)
+#define MSK_RCP_DATA_MASK_OFS1 (4997L)
+#define MSK_RCP_DATA_MASK_OFS2 (4998L)
+#define MSK_RCP_DATA_MASK_OFS3 (4999L)
+/* NIF */
+#define NIF_CTRL (5000L)
+#define NIF_CTRL_GT_PWDN (5001L)
+#define NIF_CTRL_GT_SEL (5002L)
+#define NIF_CTRL_LOOPBACK (5003L)
+#define NIF_CTRL_PMA_INIT (5004L)
+#define NIF_CTRL_PRBS_ERR (5005L)
+#define NIF_CTRL_PRBS_RST (5006L)
+#define NIF_CTRL_PRBS_SEL (5007L)
+#define NIF_CTRL_QPLL_SEL (5008L)
+#define NIF_CTRL_RST (5009L)
+#define NIF_CTRL_TX_INHIBIT (5010L)
+#define NIF_DRP_IF (5011L)
+#define NIF_DRP_IF_ADR (5012L)
+#define NIF_DRP_IF_DATA (5013L)
+#define NIF_DRP_IF_DBG_BUSY (5014L)
+#define NIF_DRP_IF_DONE (5015L)
+#define NIF_DRP_IF_WREN (5016L)
+#define NIF_ERROR (5017L)
+#define NIF_ERROR_HARD_ERR (5018L)
+#define NIF_ERROR_SOFT_ERR (5019L)
+#define NIF_ERROR_SOFT_ERR_CNT (5020L)
+#define NIF_GTH_TX_TUNING (5021L)
+#define NIF_GTH_TX_TUNING_DIFF_CTRL (5022L)
+#define NIF_GTH_TX_TUNING_POST_CURSOR (5023L)
+#define NIF_GTH_TX_TUNING_PRE_CURSOR (5024L)
+#define NIF_HARD_ERROR_CNT (5025L)
+#define NIF_HARD_ERROR_CNT_UNCORRECTABLE_ERRORS (5026L)
+#define NIF_STATUS (5027L)
+#define NIF_STATUS_CH_UP (5028L)
+#define NIF_STATUS_EP0_0_UP (5029L)
+#define NIF_STATUS_EP0_1_UP (5030L)
+#define NIF_STATUS_EP0_2_UP (5031L)
+#define NIF_STATUS_EP0_3_UP (5032L)
+#define NIF_STATUS_EP1_0_UP (5033L)
+#define NIF_STATUS_EP1_1_UP (5034L)
+#define NIF_STATUS_EP1_2_UP (5035L)
+#define NIF_STATUS_EP1_3_UP (5036L)
+#define NIF_STATUS_EP1_4_UP (5037L)
+#define NIF_STATUS_EP1_5_UP (5038L)
+#define NIF_STATUS_EP3_0_UP (5039L)
+#define NIF_STATUS_EP3_1_UP (5040L)
+#define NIF_STATUS_EP3_2_UP (5041L)
+#define NIF_STATUS_EP3_3_UP (5042L)
+#define NIF_STATUS_MMCM_ULCK (5043L)
+#define NIF_STATUS_PLL_LCK (5044L)
+#define NIF_STATUS_QPLLOUT_LCK (5045L)
+#define NIF_STATUS_QPLLOUT_REF_LOST (5046L)
+#define NIF_STATUS_QPLL_LCK (5047L)
+#define NIF_STATUS_RXRST_DONE (5048L)
+#define NIF_STATUS_TXRST_DONE (5049L)
+#define NIF_STATUS_STICK (5050L)
+#define NIF_STATUS_STICK_CH_DOWN (5051L)
+#define NIF_STATUS_STICK_LN_DOWN (5052L)
+#define NIF_STATUS_STICK_MMCM_ULCK (5053L)
+#define NIF_STATUS_STICK_PLL_ULCK (5054L)
+#define NIF_USER_REG (5055L)
+#define NIF_USER_REG_PRIMARY (5056L)
+#define NIF_USER_REG_RST (5057L)
+#define NIF_USER_REG_SECONDARY (5058L)
+#define NIF_USER_REG_STAT (5059L)
+#define NIF_USER_REG_STAT_LL (5060L)
+/* PCIE3 */
+#define PCIE3_BUILD_SEED (5228L)
+#define PCIE3_BUILD_SEED_BUILD_SEED (5229L)
+#define PCIE3_BUILD_TIME (5230L)
+#define PCIE3_BUILD_TIME_TIME (5231L)
+#define PCIE3_CONFIG (5232L)
+#define PCIE3_CONFIG_EXT_TAG (5233L)
+#define PCIE3_CONFIG_MAX_READ (5234L)
+#define PCIE3_CONFIG_MAX_TLP (5235L)
+#define PCIE3_CONTROL (5236L)
+#define PCIE3_CONTROL_RD_ATTR (5237L)
+#define PCIE3_CONTROL_WRAW (5238L)
+#define PCIE3_CONTROL_WR_ATTR (5239L)
+#define PCIE3_CORESPEED (5240L)
+#define PCIE3_CORESPEED_CORESPEED (5241L)
+#define PCIE3_CORESPEED_DDR3SPEED (5242L)
+#define PCIE3_DRP_COMMON (5243L)
+#define PCIE3_DRP_COMMON_DRP_ADDR (5244L)
+#define PCIE3_DRP_COMMON_DRP_RDY (5245L)
+#define PCIE3_DRP_COMMON_GTH_SEL (5246L)
+#define PCIE3_DRP_COMMON_WR (5247L)
+#define PCIE3_DRP_DATE (5248L)
+#define PCIE3_DRP_DATE_DRP_DATA (5249L)
+#define PCIE3_EP_TO_RP_ERR (5250L)
+#define PCIE3_EP_TO_RP_ERR_ERR_COR (5251L)
+#define PCIE3_EP_TO_RP_ERR_ERR_FATAL (5252L)
+#define PCIE3_EP_TO_RP_ERR_ERR_NONFATAL (5253L)
+#define PCIE3_INT_CLR (5254L)
+#define PCIE3_INT_CLR_AVR (5255L)
+#define PCIE3_INT_CLR_FHM (5256L)
+#define PCIE3_INT_CLR_INT_0 (5257L)
+#define PCIE3_INT_CLR_INT_1 (5258L)
+#define PCIE3_INT_CLR_INT_10 (5259L)
+#define PCIE3_INT_CLR_INT_11 (5260L)
+#define PCIE3_INT_CLR_INT_12 (5261L)
+#define PCIE3_INT_CLR_INT_13 (5262L)
+#define PCIE3_INT_CLR_INT_14 (5263L)
+#define PCIE3_INT_CLR_INT_15 (5264L)
+#define PCIE3_INT_CLR_INT_16 (5265L)
+#define PCIE3_INT_CLR_INT_17 (5266L)
+#define PCIE3_INT_CLR_INT_18 (5267L)
+#define PCIE3_INT_CLR_INT_19 (5268L)
+#define PCIE3_INT_CLR_INT_2 (5269L)
+#define PCIE3_INT_CLR_INT_20 (5270L)
+#define PCIE3_INT_CLR_INT_21 (5271L)
+#define PCIE3_INT_CLR_INT_22 (5272L)
+#define PCIE3_INT_CLR_INT_23 (5273L)
+#define PCIE3_INT_CLR_INT_24 (5274L)
+#define PCIE3_INT_CLR_INT_25 (5275L)
+#define PCIE3_INT_CLR_INT_26 (5276L)
+#define PCIE3_INT_CLR_INT_27 (5277L)
+#define PCIE3_INT_CLR_INT_28 (5278L)
+#define PCIE3_INT_CLR_INT_29 (5279L)
+#define PCIE3_INT_CLR_INT_3 (5280L)
+#define PCIE3_INT_CLR_INT_30 (5281L)
+#define PCIE3_INT_CLR_INT_31 (5282L)
+#define PCIE3_INT_CLR_INT_4 (5283L)
+#define PCIE3_INT_CLR_INT_5 (5284L)
+#define PCIE3_INT_CLR_INT_6 (5285L)
+#define PCIE3_INT_CLR_INT_7 (5286L)
+#define PCIE3_INT_CLR_INT_8 (5287L)
+#define PCIE3_INT_CLR_INT_9 (5288L)
+#define PCIE3_INT_CLR_PORT (5289L)
+#define PCIE3_INT_CLR_PPS (5290L)
+#define PCIE3_INT_CLR_QSPI (5291L)
+#define PCIE3_INT_CLR_SPIM (5292L)
+#define PCIE3_INT_CLR_SPIS (5293L)
+#define PCIE3_INT_CLR_STA (5294L)
+#define PCIE3_INT_CLR_TIMER (5295L)
+#define PCIE3_INT_FORC (5296L)
+#define PCIE3_INT_FORC_AVR (5297L)
+#define PCIE3_INT_FORC_FHM (5298L)
+#define PCIE3_INT_FORC_INT_0 (5299L)
+#define PCIE3_INT_FORC_INT_1 (5300L)
+#define PCIE3_INT_FORC_INT_10 (5301L)
+#define PCIE3_INT_FORC_INT_11 (5302L)
+#define PCIE3_INT_FORC_INT_12 (5303L)
+#define PCIE3_INT_FORC_INT_13 (5304L)
+#define PCIE3_INT_FORC_INT_14 (5305L)
+#define PCIE3_INT_FORC_INT_15 (5306L)
+#define PCIE3_INT_FORC_INT_16 (5307L)
+#define PCIE3_INT_FORC_INT_17 (5308L)
+#define PCIE3_INT_FORC_INT_18 (5309L)
+#define PCIE3_INT_FORC_INT_19 (5310L)
+#define PCIE3_INT_FORC_INT_2 (5311L)
+#define PCIE3_INT_FORC_INT_20 (5312L)
+#define PCIE3_INT_FORC_INT_21 (5313L)
+#define PCIE3_INT_FORC_INT_22 (5314L)
+#define PCIE3_INT_FORC_INT_23 (5315L)
+#define PCIE3_INT_FORC_INT_24 (5316L)
+#define PCIE3_INT_FORC_INT_25 (5317L)
+#define PCIE3_INT_FORC_INT_26 (5318L)
+#define PCIE3_INT_FORC_INT_27 (5319L)
+#define PCIE3_INT_FORC_INT_28 (5320L)
+#define PCIE3_INT_FORC_INT_29 (5321L)
+#define PCIE3_INT_FORC_INT_3 (5322L)
+#define PCIE3_INT_FORC_INT_30 (5323L)
+#define PCIE3_INT_FORC_INT_31 (5324L)
+#define PCIE3_INT_FORC_INT_4 (5325L)
+#define PCIE3_INT_FORC_INT_5 (5326L)
+#define PCIE3_INT_FORC_INT_6 (5327L)
+#define PCIE3_INT_FORC_INT_7 (5328L)
+#define PCIE3_INT_FORC_INT_8 (5329L)
+#define PCIE3_INT_FORC_INT_9 (5330L)
+#define PCIE3_INT_FORC_PORT (5331L)
+#define PCIE3_INT_FORC_PPS (5332L)
+#define PCIE3_INT_FORC_QSPI (5333L)
+#define PCIE3_INT_FORC_SPIM (5334L)
+#define PCIE3_INT_FORC_SPIS (5335L)
+#define PCIE3_INT_FORC_STA (5336L)
+#define PCIE3_INT_FORC_TIMER (5337L)
+#define PCIE3_INT_MASK (5338L)
+#define PCIE3_INT_MASK_AVR (5339L)
+#define PCIE3_INT_MASK_FHM (5340L)
+#define PCIE3_INT_MASK_IIC0 (5341L)
+#define PCIE3_INT_MASK_IIC1 (5342L)
+#define PCIE3_INT_MASK_IIC2 (5343L)
+#define PCIE3_INT_MASK_IIC3 (5344L)
+#define PCIE3_INT_MASK_IIC4 (5345L)
+#define PCIE3_INT_MASK_IIC5 (5346L)
+#define PCIE3_INT_MASK_INT_0 (5347L)
+#define PCIE3_INT_MASK_INT_1 (5348L)
+#define PCIE3_INT_MASK_INT_10 (5349L)
+#define PCIE3_INT_MASK_INT_11 (5350L)
+#define PCIE3_INT_MASK_INT_12 (5351L)
+#define PCIE3_INT_MASK_INT_13 (5352L)
+#define PCIE3_INT_MASK_INT_14 (5353L)
+#define PCIE3_INT_MASK_INT_15 (5354L)
+#define PCIE3_INT_MASK_INT_16 (5355L)
+#define PCIE3_INT_MASK_INT_17 (5356L)
+#define PCIE3_INT_MASK_INT_18 (5357L)
+#define PCIE3_INT_MASK_INT_19 (5358L)
+#define PCIE3_INT_MASK_INT_2 (5359L)
+#define PCIE3_INT_MASK_INT_20 (5360L)
+#define PCIE3_INT_MASK_INT_21 (5361L)
+#define PCIE3_INT_MASK_INT_22 (5362L)
+#define PCIE3_INT_MASK_INT_23 (5363L)
+#define PCIE3_INT_MASK_INT_24 (5364L)
+#define PCIE3_INT_MASK_INT_25 (5365L)
+#define PCIE3_INT_MASK_INT_26 (5366L)
+#define PCIE3_INT_MASK_INT_27 (5367L)
+#define PCIE3_INT_MASK_INT_28 (5368L)
+#define PCIE3_INT_MASK_INT_29 (5369L)
+#define PCIE3_INT_MASK_INT_3 (5370L)
+#define PCIE3_INT_MASK_INT_30 (5371L)
+#define PCIE3_INT_MASK_INT_31 (5372L)
+#define PCIE3_INT_MASK_INT_4 (5373L)
+#define PCIE3_INT_MASK_INT_5 (5374L)
+#define PCIE3_INT_MASK_INT_6 (5375L)
+#define PCIE3_INT_MASK_INT_7 (5376L)
+#define PCIE3_INT_MASK_INT_8 (5377L)
+#define PCIE3_INT_MASK_INT_9 (5378L)
+#define PCIE3_INT_MASK_PORT (5379L)
+#define PCIE3_INT_MASK_PPS (5380L)
+#define PCIE3_INT_MASK_QSPI (5381L)
+#define PCIE3_INT_MASK_SPIM (5382L)
+#define PCIE3_INT_MASK_SPIS (5383L)
+#define PCIE3_INT_MASK_STA (5384L)
+#define PCIE3_INT_MASK_TIMER (5385L)
+#define PCIE3_LAT_CTRL (5386L)
+#define PCIE3_LAT_CTRL_CLEAR_RAM (5387L)
+#define PCIE3_LAT_CTRL_ENABLE (5388L)
+#define PCIE3_LAT_CTRL_PRESCAL (5389L)
+#define PCIE3_LAT_CTRL_RAM_VLD (5390L)
+#define PCIE3_LAT_CTRL_READ_RAM (5391L)
+#define PCIE3_LAT_CTRL_STATUS (5392L)
+#define PCIE3_LAT_MAX (5393L)
+#define PCIE3_LAT_MAX_MAX (5394L)
+#define PCIE3_LAT_RAMADR (5395L)
+#define PCIE3_LAT_RAMADR_ADR (5396L)
+#define PCIE3_LAT_RAMDATA (5397L)
+#define PCIE3_LAT_RAMDATA_DATA (5398L)
+#define PCIE3_LINK_STATUS (5399L)
+#define PCIE3_LINK_STATUS_CLEAR (5400L)
+#define PCIE3_LINK_STATUS_RETRAIN_CNT (5401L)
+#define PCIE3_MARKADR_LSB (5402L)
+#define PCIE3_MARKADR_LSB_ADR (5403L)
+#define PCIE3_MARKADR_MSB (5404L)
+#define PCIE3_MARKADR_MSB_ADR (5405L)
+#define PCIE3_PB_INTERVAL (5406L)
+#define PCIE3_PB_INTERVAL_INTERVAL (5407L)
+#define PCIE3_PB_MAX_RD (5408L)
+#define PCIE3_PB_MAX_RD_PB (5409L)
+#define PCIE3_PB_MAX_WR (5410L)
+#define PCIE3_PB_MAX_WR_PB (5411L)
+#define PCIE3_PCIE_CTRL (5412L)
+#define PCIE3_PCIE_CTRL_EXT_TAG_ENA (5413L)
+#define PCIE3_PCI_ENDPOINT (5414L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK (5415L)
+#define PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK (5416L)
+#define PCIE3_PCI_ENDPOINT_GET_MSG (5417L)
+#define PCIE3_PCI_ENDPOINT_IF_ID (5418L)
+#define PCIE3_PCI_ENDPOINT_SEND_MSG (5419L)
+#define PCIE3_PCI_TEST0 (5420L)
+#define PCIE3_PCI_TEST0_DATA (5421L)
+#define PCIE3_PCI_TEST1 (5422L)
+#define PCIE3_PCI_TEST1_DATA (5423L)
+#define PCIE3_PCI_TEST2 (5424L)
+#define PCIE3_PCI_TEST2_DATA (5425L)
+#define PCIE3_PCI_TEST3 (5426L)
+#define PCIE3_PCI_TEST3_DATA (5427L)
+#define PCIE3_PROD_ID_EX (5428L)
+#define PCIE3_PROD_ID_EX_LAYOUT (5429L)
+#define PCIE3_PROD_ID_EX_LAYOUT_VERSION (5430L)
+#define PCIE3_PROD_ID_EX_RESERVED (5431L)
+#define PCIE3_PROD_ID_LSB (5432L)
+#define PCIE3_PROD_ID_LSB_GROUP_ID (5433L)
+#define PCIE3_PROD_ID_LSB_REV_ID (5434L)
+#define PCIE3_PROD_ID_LSB_VER_ID (5435L)
+#define PCIE3_PROD_ID_MSB (5436L)
+#define PCIE3_PROD_ID_MSB_BUILD_NO (5437L)
+#define PCIE3_PROD_ID_MSB_PATCH_NO (5438L)
+#define PCIE3_PROD_ID_MSB_TYPE_ID (5439L)
+#define PCIE3_RESET_CTRL (5440L)
+#define PCIE3_RESET_CTRL_MASK (5441L)
+#define PCIE3_RP_TO_EP_ERR (5442L)
+#define PCIE3_RP_TO_EP_ERR_ERR_COR (5443L)
+#define PCIE3_RP_TO_EP_ERR_ERR_FATAL (5444L)
+#define PCIE3_RP_TO_EP_ERR_ERR_NONFATAL (5445L)
+#define PCIE3_SAMPLE_TIME (5446L)
+#define PCIE3_SAMPLE_TIME_SAMPLE_TIME (5447L)
+#define PCIE3_STATUS (5448L)
+#define PCIE3_STATUS_RD_ERR (5449L)
+#define PCIE3_STATUS_TAGS_IN_USE (5450L)
+#define PCIE3_STATUS_WR_ERR (5451L)
+#define PCIE3_STATUS0 (5452L)
+#define PCIE3_STATUS0_TAGS_IN_USE (5453L)
+#define PCIE3_STATUS0_UR_ADDR (5454L)
+#define PCIE3_STATUS0_UR_DWORD (5455L)
+#define PCIE3_STATUS0_UR_FBE (5456L)
+#define PCIE3_STATUS0_UR_FMT (5457L)
+#define PCIE3_STATUS0_UR_LBE (5458L)
+#define PCIE3_STATUS0_UR_REG (5459L)
+#define PCIE3_STAT_CTRL (5460L)
+#define PCIE3_STAT_CTRL_STAT_ENA (5461L)
+#define PCIE3_STAT_CTRL_STAT_REQ (5462L)
+#define PCIE3_STAT_REFCLK (5463L)
+#define PCIE3_STAT_REFCLK_REFCLK250 (5464L)
+#define PCIE3_STAT_RQ_RDY (5465L)
+#define PCIE3_STAT_RQ_RDY_COUNTER (5466L)
+#define PCIE3_STAT_RQ_VLD (5467L)
+#define PCIE3_STAT_RQ_VLD_COUNTER (5468L)
+#define PCIE3_STAT_RX (5469L)
+#define PCIE3_STAT_RX_COUNTER (5470L)
+#define PCIE3_STAT_TX (5471L)
+#define PCIE3_STAT_TX_COUNTER (5472L)
+#define PCIE3_TEST0 (5473L)
+#define PCIE3_TEST0_DATA (5474L)
+#define PCIE3_TEST1 (5475L)
+#define PCIE3_TEST1_DATA (5476L)
+#define PCIE3_TEST2_DATA (5477L)
+#define PCIE3_TEST3_DATA (5478L)
+#define PCIE3_UUID0 (5479L)
+#define PCIE3_UUID0_UUID0 (5480L)
+#define PCIE3_UUID1 (5481L)
+#define PCIE3_UUID1_UUID1 (5482L)
+#define PCIE3_UUID2 (5483L)
+#define PCIE3_UUID2_UUID2 (5484L)
+#define PCIE3_UUID3 (5485L)
+#define PCIE3_UUID3_UUID3 (5486L)
+/* PCI_RD_TG */
+#define PCI_RD_TG_TG_CTRL (5487L)
+#define PCI_RD_TG_TG_CTRL_TG_RD_RDY (5488L)
+#define PCI_RD_TG_TG_RDADDR (5489L)
+#define PCI_RD_TG_TG_RDADDR_RAM_ADDR (5490L)
+#define PCI_RD_TG_TG_RDDATA0 (5491L)
+#define PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW (5492L)
+#define PCI_RD_TG_TG_RDDATA1 (5493L)
+#define PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH (5494L)
+#define PCI_RD_TG_TG_RDDATA2 (5495L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_HID (5496L)
+#define PCI_RD_TG_TG_RDDATA2_REQ_SIZE (5497L)
+#define PCI_RD_TG_TG_RDDATA2_WAIT (5498L)
+#define PCI_RD_TG_TG_RDDATA2_WRAP (5499L)
+#define PCI_RD_TG_TG_RD_RUN (5500L)
+#define PCI_RD_TG_TG_RD_RUN_RD_ITERATION (5501L)
+/* PCI_TA */
+#define PCI_TA_CONTROL (5502L)
+#define PCI_TA_CONTROL_ENABLE (5503L)
+#define PCI_TA_LENGTH_ERROR (5504L)
+#define PCI_TA_LENGTH_ERROR_AMOUNT (5505L)
+#define PCI_TA_PACKET_BAD (5506L)
+#define PCI_TA_PACKET_BAD_AMOUNT (5507L)
+#define PCI_TA_PACKET_GOOD (5508L)
+#define PCI_TA_PACKET_GOOD_AMOUNT (5509L)
+#define PCI_TA_PAYLOAD_ERROR (5510L)
+#define PCI_TA_PAYLOAD_ERROR_AMOUNT (5511L)
+/* PCI_WR_TG */
+#define PCI_WR_TG_TG_CTRL (5512L)
+#define PCI_WR_TG_TG_CTRL_TG_WR_RDY (5513L)
+#define PCI_WR_TG_TG_SEQ (5514L)
+#define PCI_WR_TG_TG_SEQ_SEQUENCE (5515L)
+#define PCI_WR_TG_TG_WRADDR (5516L)
+#define PCI_WR_TG_TG_WRADDR_RAM_ADDR (5517L)
+#define PCI_WR_TG_TG_WRDATA0 (5518L)
+#define PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW (5519L)
+#define PCI_WR_TG_TG_WRDATA1 (5520L)
+#define PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH (5521L)
+#define PCI_WR_TG_TG_WRDATA2 (5522L)
+#define PCI_WR_TG_TG_WRDATA2_INC_MODE (5523L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_HID (5524L)
+#define PCI_WR_TG_TG_WRDATA2_REQ_SIZE (5525L)
+#define PCI_WR_TG_TG_WRDATA2_WAIT (5526L)
+#define PCI_WR_TG_TG_WRDATA2_WRAP (5527L)
+#define PCI_WR_TG_TG_WR_RUN (5528L)
+#define PCI_WR_TG_TG_WR_RUN_WR_ITERATION (5529L)
+/* PCM_NT100A01_01 */
+#define PCM_NT100A01_01_CTRL (5530L)
+#define PCM_NT100A01_01_CTRL_PTP_CLKSEL (5531L)
+#define PCM_NT100A01_01_CTRL_REC_MMCM_RST (5532L)
+#define PCM_NT100A01_01_CTRL_TS_CLKSEL (5533L)
+#define PCM_NT100A01_01_CTRL_TS_MMCM_RST (5534L)
+#define PCM_NT100A01_01_GPIO_I (5535L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_C2B (5536L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_CS_CA (5537L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_INT_C1B (5538L)
+#define PCM_NT100A01_01_GPIO_I_SI5328_LOL (5539L)
+#define PCM_NT100A01_01_GPIO_O (5540L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_CS_CA (5541L)
+#define PCM_NT100A01_01_GPIO_O_SI5328_RST_B (5542L)
+#define PCM_NT100A01_01_GPIO_T (5543L)
+#define PCM_NT100A01_01_GPIO_T_SI5328_CS_CA (5544L)
+#define PCM_NT100A01_01_LATCH (5545L)
+#define PCM_NT100A01_01_LATCH_REC_MMCM_LOCKED (5546L)
+#define PCM_NT100A01_01_LATCH_TCXO_MMCM_LOCKED (5547L)
+#define PCM_NT100A01_01_LATCH_TS_MMCM_LOCKED (5548L)
+#define PCM_NT100A01_01_STAT (5549L)
+#define PCM_NT100A01_01_STAT_REC_MMCM_LOCKED (5550L)
+#define PCM_NT100A01_01_STAT_TCXO_MMCM_LOCKED (5551L)
+#define PCM_NT100A01_01_STAT_TS_MMCM_LOCKED (5552L)
+/* PCM_NT50B01_01 */
+#define PCM_NT50B01_01_CTRL (5553L)
+#define PCM_NT50B01_01_CTRL_TS_CLKSEL (5554L)
+#define PCM_NT50B01_01_CTRL_TS_MMCM_RST (5555L)
+#define PCM_NT50B01_01_LATCH (5556L)
+#define PCM_NT50B01_01_LATCH_TS_MMCM_LOCKED (5557L)
+#define PCM_NT50B01_01_STAT (5558L)
+#define PCM_NT50B01_01_STAT_TS_MMCM_LOCKED (5559L)
+/* PCS */
+#define PCS_BER_COUNT (5560L)
+#define PCS_BER_COUNT_CNT (5561L)
+#define PCS_BIP_COUNT (5562L)
+#define PCS_BIP_COUNT_CNT (5563L)
+#define PCS_BLOCK_LOCK (5564L)
+#define PCS_BLOCK_LOCK_LOCK (5565L)
+#define PCS_BLOCK_LOCK_LATCH (5566L)
+#define PCS_BLOCK_LOCK_LATCH_LATCH_LOCK (5567L)
+#define PCS_BLOCK_LOCK_ST (5568L)
+#define PCS_BLOCK_LOCK_ST_LATCH_STATE (5569L)
+#define PCS_DDR3_STATUS (5570L)
+#define PCS_DDR3_STATUS_CALIB_DONE (5571L)
+#define PCS_DRP_CONFIG (5572L)
+#define PCS_DRP_CONFIG_DRP_ADR (5573L)
+#define PCS_DRP_CONFIG_DRP_DI (5574L)
+#define PCS_DRP_CONFIG_DRP_EN (5575L)
+#define PCS_DRP_CONFIG_DRP_WREN (5576L)
+#define PCS_DRP_DATA (5577L)
+#define PCS_DRP_DATA_DRP_DO (5578L)
+#define PCS_DRP_DATA_DRP_RDY (5579L)
+#define PCS_FSM_DONE (5580L)
+#define PCS_FSM_DONE_RX_RST_DONE (5581L)
+#define PCS_FSM_DONE_TX_RST_DONE (5582L)
+#define PCS_GTH_CONFIG (5583L)
+#define PCS_GTH_CONFIG_EYE_SCAN_RST (5584L)
+#define PCS_GTH_CONFIG_EYE_SCAN_TRIG (5585L)
+#define PCS_GTH_CONFIG_GT_LOOP (5586L)
+#define PCS_GTH_CONFIG_GT_LPM_EN (5587L)
+#define PCS_GTH_CONFIG_GT_MRST (5588L)
+#define PCS_GTH_CONFIG_GT_RX_RST (5589L)
+#define PCS_GTH_CONFIG_GT_SOFT_RST (5590L)
+#define PCS_GTH_CONFIG_GT_TX_RST (5591L)
+#define PCS_GTH_CONFIG_RX_MONITOR_SEL (5592L)
+#define PCS_GTH_CONFIG_RX_PCS_RST (5593L)
+#define PCS_GTH_CONFIG_RX_USER_RDY (5594L)
+#define PCS_GTH_CONFIG_TX_PCS_RST (5595L)
+#define PCS_GTH_CONFIG_TX_USER_RDYU (5596L)
+#define PCS_GTH_CONTROL (5597L)
+#define PCS_GTH_CONTROL_CPLL_LOCK (5598L)
+#define PCS_GTH_CONTROL_CPLL_REFCLK_LOST (5599L)
+#define PCS_GTH_CONTROL_RX_BUF_RST (5600L)
+#define PCS_GTH_TX_TUNING (5601L)
+#define PCS_GTH_TX_TUNING_DIFF_CTRL (5602L)
+#define PCS_GTH_TX_TUNING_POST_CURSOR (5603L)
+#define PCS_GTH_TX_TUNING_PRE_CURSOR (5604L)
+#define PCS_LANE_LOCK (5605L)
+#define PCS_LANE_LOCK_LOCK (5606L)
+#define PCS_LANE_LOCK_LATCH (5607L)
+#define PCS_LANE_LOCK_LATCH_LATCH_LOCK (5608L)
+#define PCS_LANE_LOCK_ST (5609L)
+#define PCS_LANE_LOCK_ST_LATCH_STATE (5610L)
+#define PCS_LANE_MAPPING (5611L)
+#define PCS_LANE_MAPPING_LANE (5612L)
+#define PCS_LANE_OFFSET (5613L)
+#define PCS_LANE_OFFSET_DIFF (5614L)
+#define PCS_PCS_CONFIG (5615L)
+#define PCS_PCS_CONFIG_BER_RST (5616L)
+#define PCS_PCS_CONFIG_BIP_RST (5617L)
+#define PCS_PCS_CONFIG_LANE_ADDR (5618L)
+#define PCS_PCS_CONFIG_LANE_BLOCK_CLR (5619L)
+#define PCS_PCS_CONFIG_TIME_OFFSET_RX (5620L)
+#define PCS_PCS_CONFIG_TXRX_LOOP (5621L)
+#define PCS_PCS_STATUS (5622L)
+#define PCS_PCS_STATUS_ALIGN (5623L)
+#define PCS_PCS_STATUS_DELAY_ERR (5624L)
+#define PCS_PCS_STATUS_FIFO_DELAY (5625L)
+#define PCS_PCS_STATUS_HI_BER (5626L)
+#define PCS_POLARITY (5627L)
+#define PCS_POLARITY_RX_POL (5628L)
+#define PCS_POLARITY_TX_POL (5629L)
+/* PCS100 */
+#define PCS100_BER_COUNT (5630L)
+#define PCS100_BER_COUNT_CNT (5631L)
+#define PCS100_BIP_COUNT (5632L)
+#define PCS100_BIP_COUNT_CNT (5633L)
+#define PCS100_BLOCK_LOCK (5634L)
+#define PCS100_BLOCK_LOCK_LOCK (5635L)
+#define PCS100_BLOCK_LOCK_LATCH (5636L)
+#define PCS100_BLOCK_LOCK_LATCH_LATCH_LOCK (5637L)
+#define PCS100_BLOCK_LOCK_ST (5638L)
+#define PCS100_BLOCK_LOCK_ST_LATCH_STATE (5639L)
+#define PCS100_DDR3_STATUS (5640L)
+#define PCS100_DDR3_STATUS_CALIB_DONE (5641L)
+#define PCS100_DRP_CONFIG (5642L)
+#define PCS100_DRP_CONFIG_DRP_ADR (5643L)
+#define PCS100_DRP_CONFIG_DRP_DI (5644L)
+#define PCS100_DRP_CONFIG_DRP_EN (5645L)
+#define PCS100_DRP_CONFIG_DRP_WREN (5646L)
+#define PCS100_DRP_DATA (5647L)
+#define PCS100_DRP_DATA_DRP_DO (5648L)
+#define PCS100_DRP_DATA_DRP_RDY (5649L)
+#define PCS100_FSM_DONE (5650L)
+#define PCS100_FSM_DONE_RX_RST_DONE (5651L)
+#define PCS100_FSM_DONE_TX_RST_DONE (5652L)
+#define PCS100_GTH_CONFIG (5653L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_RST (5654L)
+#define PCS100_GTH_CONFIG_EYE_SCAN_TRIG (5655L)
+#define PCS100_GTH_CONFIG_GT_LOOP (5656L)
+#define PCS100_GTH_CONFIG_GT_MRST (5657L)
+#define PCS100_GTH_CONFIG_GT_RX_RST (5658L)
+#define PCS100_GTH_CONFIG_GT_SOFT_RST (5659L)
+#define PCS100_GTH_CONFIG_GT_TX_RST (5660L)
+#define PCS100_GTH_CONFIG_RX_MONITOR_SEL (5661L)
+#define PCS100_GTH_CONFIG_RX_PCS_RST (5662L)
+#define PCS100_GTH_CONFIG_RX_USER_RDY (5663L)
+#define PCS100_GTH_CONFIG_TX_PCS_RST (5664L)
+#define PCS100_GTH_CONFIG_TX_USER_RDYU (5665L)
+#define PCS100_GTH_CONTROL (5666L)
+#define PCS100_GTH_CONTROL_CPLL_LOCK (5667L)
+#define PCS100_GTH_CONTROL_CPLL_REFCLK_LOST (5668L)
+#define PCS100_GTH_CONTROL_QPLL_LOCK (5669L)
+#define PCS100_GTH_CONTROL_QPLL_REFCLK_LOST (5670L)
+#define PCS100_GTH_CONTROL_RX_BUF_RST (5671L)
+#define PCS100_GTH_TX_TUNING (5672L)
+#define PCS100_GTH_TX_TUNING_DIFF_CTRL (5673L)
+#define PCS100_GTH_TX_TUNING_POST_CURSOR (5674L)
+#define PCS100_GTH_TX_TUNING_PRE_CURSOR (5675L)
+#define PCS100_LANE_LOCK (5676L)
+#define PCS100_LANE_LOCK_LOCK (5677L)
+#define PCS100_LANE_LOCK_LATCH (5678L)
+#define PCS100_LANE_LOCK_LATCH_LATCH_LOCK (5679L)
+#define PCS100_LANE_LOCK_ST (5680L)
+#define PCS100_LANE_LOCK_ST_LATCH_STATE (5681L)
+#define PCS100_LANE_MAPPING (5682L)
+#define PCS100_LANE_MAPPING_LANE (5683L)
+#define PCS100_LANE_OFFSET (5684L)
+#define PCS100_LANE_OFFSET_DIFF (5685L)
+#define PCS100_PCS_CONFIG (5686L)
+#define PCS100_PCS_CONFIG_BER_RST (5687L)
+#define PCS100_PCS_CONFIG_BIP_RST (5688L)
+#define PCS100_PCS_CONFIG_LANE_ADDR (5689L)
+#define PCS100_PCS_CONFIG_LANE_BLOCK_CLR (5690L)
+#define PCS100_PCS_CONFIG_TIME_OFFSET_RX (5691L)
+#define PCS100_PCS_CONFIG_TXRX_LOOP (5692L)
+#define PCS100_PCS_STATUS (5693L)
+#define PCS100_PCS_STATUS_ALIGN (5694L)
+#define PCS100_PCS_STATUS_DELAY_ERR (5695L)
+#define PCS100_PCS_STATUS_FIFO_DELAY (5696L)
+#define PCS100_PCS_STATUS_HI_BER (5697L)
+/* PDB */
+#define PDB_CONFIG (5698L)
+#define PDB_CONFIG_PORT_OFS (5699L)
+#define PDB_CONFIG_TS_FORMAT (5700L)
+#define PDB_RCP_CTRL (5701L)
+#define PDB_RCP_CTRL_ADR (5702L)
+#define PDB_RCP_CTRL_CNT (5703L)
+#define PDB_RCP_DATA (5704L)
+#define PDB_RCP_DATA_ALIGN (5705L)
+#define PDB_RCP_DATA_CRC_OVERWRITE (5706L)
+#define PDB_RCP_DATA_DESCRIPTOR (5707L)
+#define PDB_RCP_DATA_DESC_LEN (5708L)
+#define PDB_RCP_DATA_DUPLICATE_BIT (5709L)
+#define PDB_RCP_DATA_DUPLICATE_EN (5710L)
+#define PDB_RCP_DATA_IP_PROT_TNL (5711L)
+#define PDB_RCP_DATA_OFS0_DYN (5712L)
+#define PDB_RCP_DATA_OFS0_REL (5713L)
+#define PDB_RCP_DATA_OFS1_DYN (5714L)
+#define PDB_RCP_DATA_OFS1_REL (5715L)
+#define PDB_RCP_DATA_OFS2_DYN (5716L)
+#define PDB_RCP_DATA_OFS2_REL (5717L)
+#define PDB_RCP_DATA_PCAP_KEEP_FCS (5718L)
+#define PDB_RCP_DATA_PPC_HSH (5719L)
+#define PDB_RCP_DATA_TX_IGNORE (5720L)
+#define PDB_RCP_DATA_TX_NOW (5721L)
+#define PDB_RCP_DATA_TX_PORT (5722L)
+/* PDI */
+#define PDI_CR (5723L)
+#define PDI_CR_EN (5724L)
+#define PDI_CR_PARITY (5725L)
+#define PDI_CR_RST (5726L)
+#define PDI_CR_RXRST (5727L)
+#define PDI_CR_STOP (5728L)
+#define PDI_CR_TXRST (5729L)
+#define PDI_DRR (5730L)
+#define PDI_DRR_DRR (5731L)
+#define PDI_DTR (5732L)
+#define PDI_DTR_DTR (5733L)
+#define PDI_PRE (5734L)
+#define PDI_PRE_PRE (5735L)
+#define PDI_SR (5736L)
+#define PDI_SR_DISABLE_BUSY (5737L)
+#define PDI_SR_DONE (5738L)
+#define PDI_SR_ENABLE_BUSY (5739L)
+#define PDI_SR_FRAME_ERR (5740L)
+#define PDI_SR_OVERRUN_ERR (5741L)
+#define PDI_SR_PARITY_ERR (5742L)
+#define PDI_SR_RXLVL (5743L)
+#define PDI_SR_RX_BUSY (5744L)
+#define PDI_SR_TXLVL (5745L)
+#define PDI_SR_TX_BUSY (5746L)
+#define PDI_SRR (5747L)
+#define PDI_SRR_RST (5748L)
+/* PHY10G */
+#define PHY10G_CORE_CONF (5749L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_LINK_FAULTS (5750L)
+#define PHY10G_CORE_CONF_CLEAR_PCS_STATUS2 (5751L)
+#define PHY10G_CORE_CONF_CLEAR_PMA_PMD_LINK_FAULTS (5752L)
+#define PHY10G_CORE_CONF_CLEAR_TEST_PATT_ERR_COUNT (5753L)
+#define PHY10G_CORE_CONF_DATA_PATT_SEL (5754L)
+#define PHY10G_CORE_CONF_GLOBAL_TX_DISABLE (5755L)
+#define PHY10G_CORE_CONF_NT_FORCE_LINK_DOWN (5756L)
+#define PHY10G_CORE_CONF_NT_LINKUP_LATENCY (5757L)
+#define PHY10G_CORE_CONF_PCS_LOOPBACK (5758L)
+#define PHY10G_CORE_CONF_PCS_RESET (5759L)
+#define PHY10G_CORE_CONF_PMA_LOOPBACK (5760L)
+#define PHY10G_CORE_CONF_PMA_RESET (5761L)
+#define PHY10G_CORE_CONF_PMD_TX_DISABLE (5762L)
+#define PHY10G_CORE_CONF_PRBS31_RX_EN (5763L)
+#define PHY10G_CORE_CONF_PRBS31_TX_EN (5764L)
+#define PHY10G_CORE_CONF_RX_TEST_PATT_EN (5765L)
+#define PHY10G_CORE_CONF_SET_PCS_LINK_STATUS (5766L)
+#define PHY10G_CORE_CONF_SET_PMA_LINK_STATUS (5767L)
+#define PHY10G_CORE_CONF_TEST_PATT_SEL (5768L)
+#define PHY10G_CORE_CONF_TX_TEST_PATT_EN (5769L)
+#define PHY10G_CORE_STAT (5770L)
+#define PHY10G_CORE_STAT_NT_LINK_STATE (5771L)
+#define PHY10G_CORE_STAT_PCS_BER_COUNT (5772L)
+#define PHY10G_CORE_STAT_PCS_BLOCK_LOCK (5773L)
+#define PHY10G_CORE_STAT_PCS_ERR_BLOCK_COUNT (5774L)
+#define PHY10G_CORE_STAT_PCS_HIBER (5775L)
+#define PHY10G_CORE_STAT_PCS_RESET (5776L)
+#define PHY10G_CORE_STAT_PCS_RX_FAULT (5777L)
+#define PHY10G_CORE_STAT_PCS_RX_HIBER_LH (5778L)
+#define PHY10G_CORE_STAT_PCS_RX_LINK_STATUS (5779L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED (5780L)
+#define PHY10G_CORE_STAT_PCS_RX_LOCKED_LL (5781L)
+#define PHY10G_CORE_STAT_PCS_TEST_PATT_ERR_COUNT (5782L)
+#define PHY10G_CORE_STAT_PCS_TX_FAULT (5783L)
+#define PHY10G_CORE_STAT_PMA_PMD_LINK_STAT (5784L)
+#define PHY10G_CORE_STAT_PMA_PMD_RX_FAULT (5785L)
+#define PHY10G_CORE_STAT_PMA_PMD_TX_FAULT (5786L)
+#define PHY10G_CORE_STAT_PMA_RESET (5787L)
+#define PHY10G_CORE_STAT_RX_SIG_DET (5788L)
+#define PHY10G_CORE_STAT_TENG_PCS_RX_LINK_STATUS (5789L)
+#define PHY10G_CTRL (5790L)
+#define PHY10G_CTRL_FORCE_LINK_DOWN (5791L)
+#define PHY10G_CTRL_HOST_LOOPBACK (5792L)
+#define PHY10G_CTRL_LINE_LOOPBACK (5793L)
+#define PHY10G_CTRL_LINKUP_LATENCY (5794L)
+#define PHY10G_CTRL_SOFT_RESET (5795L)
+#define PHY10G_GPIO (5796L)
+#define PHY10G_GPIO_ABS (5797L)
+#define PHY10G_GPIO_LED_MODE (5798L)
+#define PHY10G_GPIO_LED_MODE_NIM (5799L)
+#define PHY10G_GPIO_LED_MODE_PHY (5800L)
+#define PHY10G_GPIO_PWR_EN (5801L)
+#define PHY10G_GPIO_RX_LOS (5802L)
+#define PHY10G_GPIO_TX_FAULT (5803L)
+#define PHY10G_GT_CTRL (5804L)
+#define PHY10G_GT_CTRL_EYESCANRESET (5805L)
+#define PHY10G_GT_CTRL_EYESCANTRIGGER (5806L)
+#define PHY10G_GT_CTRL_RXCDRHOLD (5807L)
+#define PHY10G_GT_CTRL_RXDFELPMRESET (5808L)
+#define PHY10G_GT_CTRL_RXLPMEN (5809L)
+#define PHY10G_GT_CTRL_RXPMARESET (5810L)
+#define PHY10G_GT_CTRL_RXPRBSENABLE (5811L)
+#define PHY10G_GT_CTRL_RXRATE (5812L)
+#define PHY10G_GT_CTRL_TXDIFFCTRL (5813L)
+#define PHY10G_GT_CTRL_TXPCSRESET (5814L)
+#define PHY10G_GT_CTRL_TXPMARESET (5815L)
+#define PHY10G_GT_CTRL_TXPOSTCURSOR (5816L)
+#define PHY10G_GT_CTRL_TXPRBSENABLE (5817L)
+#define PHY10G_GT_CTRL_TXPRBSFORCEERR (5818L)
+#define PHY10G_GT_CTRL_TXPRECURSOR (5819L)
+#define PHY10G_GT_STAT (5820L)
+#define PHY10G_GT_STAT_DMONITOROUT (5821L)
+#define PHY10G_GT_STAT_EYESCANDATAERROR (5822L)
+#define PHY10G_GT_STAT_RXBUFSTATUS (5823L)
+#define PHY10G_GT_STAT_RXPMARESETDONE (5824L)
+#define PHY10G_GT_STAT_RXPRBSERR (5825L)
+#define PHY10G_GT_STAT_RXPRBSLOCKED (5826L)
+#define PHY10G_GT_STAT_RXRESETDONE (5827L)
+#define PHY10G_GT_STAT_TXBUFSTATUS (5828L)
+#define PHY10G_GT_STAT_TXRESETDONE (5829L)
+#define PHY10G_GT_STAT2 (5830L)
+#define PHY10G_GT_STAT2_DMONITOR (5831L)
+#define PHY10G_GT_STAT2_RXPRBSCNT (5832L)
+#define PHY10G_INT (5833L)
+#define PHY10G_INT_EN (5834L)
+#define PHY10G_INT_MAX_PACE (5835L)
+#define PHY10G_LINK_SUMMARY (5836L)
+#define PHY10G_LINK_SUMMARY_ABS (5837L)
+#define PHY10G_LINK_SUMMARY_CORE_STATUS (5838L)
+#define PHY10G_LINK_SUMMARY_LINK_DOWN_CNT (5839L)
+#define PHY10G_LINK_SUMMARY_NT_LINK_STATE (5840L)
+#define PHY10G_LINK_SUMMARY_RES (5841L)
+#define PHY10G_TS_COMP (5842L)
+#define PHY10G_TS_COMP_RX (5843L)
+/* PHY3S10G */
+#define PHY3S10G_ANEG_ADV_3S (5844L)
+#define PHY3S10G_ANEG_ADV_3S_DUPLEX (5845L)
+#define PHY3S10G_ANEG_ADV_3S_PAUSE (5846L)
+#define PHY3S10G_ANEG_ADV_3S_REMOTE_FAULT (5847L)
+#define PHY3S10G_CORE_CONF_10G (5848L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_LINK_FAULTS (5849L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PCS_STATUS2 (5850L)
+#define PHY3S10G_CORE_CONF_10G_CLEAR_PMA_PMD_LINK_FAULTS (5851L)
+#define PHY3S10G_CORE_CONF_10G_PCS_LOOPBACK (5852L)
+#define PHY3S10G_CORE_CONF_10G_PCS_RESET (5853L)
+#define PHY3S10G_CORE_CONF_10G_PMA_LOOPBACK (5854L)
+#define PHY3S10G_CORE_CONF_10G_PMA_RESET (5855L)
+#define PHY3S10G_CORE_CONF_10G_SET_PCS_LINK_STATUS (5856L)
+#define PHY3S10G_CORE_CONF_10G_SET_PMA_LINK_STATUS (5857L)
+#define PHY3S10G_CORE_CONF_3S (5858L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_BYPASS_EN (5859L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_EN (5860L)
+#define PHY3S10G_CORE_CONF_3S_ANEG_RESTART (5861L)
+#define PHY3S10G_CORE_CONF_3S_BASEX_OR_SGMII (5862L)
+#define PHY3S10G_CORE_CONF_3S_LINK_TIMER_BASEX (5863L)
+#define PHY3S10G_CORE_CONF_3S_LOOPBACK_CTRL (5864L)
+#define PHY3S10G_CORE_CONF_3S_POWER_DOWN (5865L)
+#define PHY3S10G_CORE_CONF_3S_SPEED (5866L)
+#define PHY3S10G_CORE_CONF_3S_UNIDIRECT_EN (5867L)
+#define PHY3S10G_CORE_STAT_10G (5868L)
+#define PHY3S10G_CORE_STAT_10G_NT_LINK_STATE (5869L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BER_COUNT (5870L)
+#define PHY3S10G_CORE_STAT_10G_PCS_BLOCK_LOCK (5871L)
+#define PHY3S10G_CORE_STAT_10G_PCS_ERR_BLOCK_COUNT (5872L)
+#define PHY3S10G_CORE_STAT_10G_PCS_HIBER (5873L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_HIBER_LH (5874L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LINK_STATUS (5875L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED (5876L)
+#define PHY3S10G_CORE_STAT_10G_PCS_RX_LOCKED_LL (5877L)
+#define PHY3S10G_CORE_STAT_10G_PMA_PMD_LINK_STAT (5878L)
+#define PHY3S10G_CORE_STAT_10G_RX_SIG_DET (5879L)
+#define PHY3S10G_CORE_STAT_10G_TENG_PCS_RX_LINK_STATUS (5880L)
+#define PHY3S10G_CORE_STAT_3S (5881L)
+#define PHY3S10G_CORE_STAT_3S_ANEG_COMPLETE (5882L)
+#define PHY3S10G_CORE_STAT_3S_DUPLEX_MODE (5883L)
+#define PHY3S10G_CORE_STAT_3S_LINK_STATUS (5884L)
+#define PHY3S10G_CORE_STAT_3S_LINK_SYNC (5885L)
+#define PHY3S10G_CORE_STAT_3S_NT_LINK_STATE (5886L)
+#define PHY3S10G_CORE_STAT_3S_PAUSE (5887L)
+#define PHY3S10G_CORE_STAT_3S_PHY_LINK_STAT (5888L)
+#define PHY3S10G_CORE_STAT_3S_REM_FAULT_ENC (5889L)
+#define PHY3S10G_CORE_STAT_3S_RESERVED (5890L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_C (5891L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_I (5892L)
+#define PHY3S10G_CORE_STAT_3S_RUDI_INVALID (5893L)
+#define PHY3S10G_CORE_STAT_3S_RXDISPERR (5894L)
+#define PHY3S10G_CORE_STAT_3S_RXNOTINTABLE (5895L)
+#define PHY3S10G_CORE_STAT_3S_SPEED (5896L)
+#define PHY3S10G_CTRL (5897L)
+#define PHY3S10G_CTRL_FORCE_LINK_DOWN (5898L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_10G (5899L)
+#define PHY3S10G_CTRL_HOST_LOOPBACK_3S (5900L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_10G (5901L)
+#define PHY3S10G_CTRL_LINE_LOOPBACK_3S (5902L)
+#define PHY3S10G_CTRL_LINKUP_LATENCY (5903L)
+#define PHY3S10G_CTRL_PHY_CHANGE_FSM_DONE (5904L)
+#define PHY3S10G_CTRL_PHY_SEL (5905L)
+#define PHY3S10G_CTRL_SOFT_RESET (5906L)
+#define PHY3S10G_GPIO (5907L)
+#define PHY3S10G_GPIO_ABS (5908L)
+#define PHY3S10G_GPIO_LED_MODE (5909L)
+#define PHY3S10G_GPIO_RATE_SEL (5910L)
+#define PHY3S10G_GPIO_RX_LOS (5911L)
+#define PHY3S10G_GPIO_TX_DISABLE (5912L)
+#define PHY3S10G_GPIO_TX_FAULT (5913L)
+#define PHY3S10G_GT_CR (5914L)
+#define PHY3S10G_GT_CR_EYESCANRESET (5915L)
+#define PHY3S10G_GT_CR_EYESCANTRIGGER (5916L)
+#define PHY3S10G_GT_CR_LOOPBACK (5917L)
+#define PHY3S10G_GT_CR_RXCDRHOLD (5918L)
+#define PHY3S10G_GT_CR_RXDFELPMRESET (5919L)
+#define PHY3S10G_GT_CR_RXLPMEN (5920L)
+#define PHY3S10G_GT_CR_RXPMARESET (5921L)
+#define PHY3S10G_GT_CR_RXPRBSCNTRESET (5922L)
+#define PHY3S10G_GT_CR_RXPRBSSEL (5923L)
+#define PHY3S10G_GT_CR_TXDIFFCTRL (5924L)
+#define PHY3S10G_GT_CR_TXPMARESET (5925L)
+#define PHY3S10G_GT_CR_TXPOSTCURSOR (5926L)
+#define PHY3S10G_GT_CR_TXPRBSFORCEERR (5927L)
+#define PHY3S10G_GT_CR_TXPRBSSEL (5928L)
+#define PHY3S10G_GT_CR_TXPRECURSOR (5929L)
+#define PHY3S10G_GT_SR (5930L)
+#define PHY3S10G_GT_SR_EYESCANDATAERROR (5931L)
+#define PHY3S10G_GT_SR_RXBUFSTATUS (5932L)
+#define PHY3S10G_GT_SR_RXPMARESETDONE (5933L)
+#define PHY3S10G_GT_SR_RXPRBSERR (5934L)
+#define PHY3S10G_GT_SR_RXRESETDONE (5935L)
+#define PHY3S10G_GT_SR_TXBUFSTATUS (5936L)
+#define PHY3S10G_GT_SR_TXRESETDONE (5937L)
+#define PHY3S10G_INT (5938L)
+#define PHY3S10G_INT_EN (5939L)
+#define PHY3S10G_INT_MAX_PACE (5940L)
+#define PHY3S10G_LINK_SUMMARY (5941L)
+#define PHY3S10G_LINK_SUMMARY_ABS (5942L)
+#define PHY3S10G_LINK_SUMMARY_ANEG_BYPASS (5943L)
+#define PHY3S10G_LINK_SUMMARY_LINK_DOWN_CNT (5944L)
+#define PHY3S10G_LINK_SUMMARY_NT_LINK_STATE (5945L)
+#define PHY3S10G_TS_COMP (5946L)
+#define PHY3S10G_TS_COMP_RX (5947L)
+#define PHY3S10G_TS_COMP_RX_10G (5948L)
+#define PHY3S10G_TS_COMP_RX_3S (5949L)
+/* PM */
+#define PM_CTRL (5950L)
+#define PM_CTRL_SW_CLEAN_DONE (5951L)
+#define PM_DEBUG_RP (5952L)
+#define PM_DEBUG_RP_RP (5953L)
+#define PM_DEBUG_RP_SETUP (5954L)
+#define PM_DEBUG_RP_SETUP_HB (5955L)
+#define PM_DEBUG_RX_BLOCK (5956L)
+#define PM_DEBUG_RX_BLOCK_MASK (5957L)
+#define PM_HB_SIZE_RX_MEM_CTRL (5958L)
+#define PM_HB_SIZE_RX_MEM_CTRL_A (5959L)
+#define PM_HB_SIZE_RX_MEM_CTRL_CNT (5960L)
+#define PM_HB_SIZE_RX_MEM_DATA (5961L)
+#define PM_HB_SIZE_RX_MEM_DATA_SIZE (5962L)
+#define PM_HB_SIZE_RX_THRESHOLD (5963L)
+#define PM_HB_SIZE_RX_THRESHOLD_D (5964L)
+#define PM_HB_SIZE_TX_THRESHOLD (5965L)
+#define PM_HB_SIZE_TX_THRESHOLD_D (5966L)
+#define PM_PBI_MEM_CTRL (5967L)
+#define PM_PBI_MEM_CTRL_A (5968L)
+#define PM_PBI_MEM_CTRL_CNT (5969L)
+#define PM_PBI_MEM_DATA (5970L)
+#define PM_PBI_MEM_DATA_PHYADDR (5971L)
+#define PM_PBI_MEM_DATA_SIZE (5972L)
+#define PM_POINTER_BANKS (5973L)
+#define PM_POINTER_BANKS_D (5974L)
+#define PM_RXTX_FAST_MEM_CTRL (5975L)
+#define PM_RXTX_FAST_MEM_CTRL_A (5976L)
+#define PM_RXTX_FAST_MEM_CTRL_CNT (5977L)
+#define PM_RXTX_FAST_MEM_DATA (5978L)
+#define PM_RXTX_FAST_MEM_DATA_BANK (5979L)
+#define PM_RXTX_FAST_MEM_DATA_ENTRY (5980L)
+#define PM_RXTX_FAST_MEM_DATA_HOST_BUFFER (5981L)
+#define PM_RXTX_FAST_MEM_DATA_RX_TX (5982L)
+#define PM_RXTX_FAST_MEM_DATA_VLD (5983L)
+#define PM_RXTX_SLOW_MEM_CTRL (5984L)
+#define PM_RXTX_SLOW_MEM_CTRL_A (5985L)
+#define PM_RXTX_SLOW_MEM_CTRL_CNT (5986L)
+#define PM_RXTX_SLOW_MEM_DATA (5987L)
+#define PM_RXTX_SLOW_MEM_DATA_BANK (5988L)
+#define PM_RXTX_SLOW_MEM_DATA_ENTRY (5989L)
+#define PM_RXTX_SLOW_MEM_DATA_HOST_BUFFER (5990L)
+#define PM_RXTX_SLOW_MEM_DATA_RX_TX (5991L)
+#define PM_RXTX_SLOW_MEM_DATA_VLD (5992L)
+#define PM_RXWP_MEM_CTRL (5993L)
+#define PM_RXWP_MEM_CTRL_A (5994L)
+#define PM_RXWP_MEM_CTRL_CNT (5995L)
+#define PM_RXWP_MEM_DATA (5996L)
+#define PM_RXWP_MEM_DATA_BANK (5997L)
+#define PM_RXWP_MEM_DATA_ENTRY (5998L)
+#define PM_RXWP_MEM_DATA_HOST_BUFFER (5999L)
+#define PM_RXWP_MEM_DATA_VLD (6000L)
+#define PM_RX_BLOCKED_STATUS (6001L)
+#define PM_RX_BLOCKED_STATUS_D (6002L)
+#define PM_RX_BLOCKED_STATUS_HI (6003L)
+#define PM_RX_BLOCKED_STATUS_HI_D (6004L)
+#define PM_RX_OVERFLOW_STATUS (6005L)
+#define PM_RX_OVERFLOW_STATUS_D (6006L)
+#define PM_RX_READER (6007L)
+#define PM_RX_READER_MASK (6008L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL (6009L)
+#define PM_RX_TX_FAST_POINTER_BLOCK_INTERVAL_D (6010L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL (6011L)
+#define PM_RX_TX_SLOW_POINTER_BLOCK_INTERVAL_D (6012L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL (6013L)
+#define PM_RX_WRITE_POINTER_BLOCK_INTERVAL_D (6014L)
+#define PM_TXRP_MEM_CTRL (6015L)
+#define PM_TXRP_MEM_CTRL_A (6016L)
+#define PM_TXRP_MEM_CTRL_CNT (6017L)
+#define PM_TXRP_MEM_DATA (6018L)
+#define PM_TXRP_MEM_DATA_BANK (6019L)
+#define PM_TXRP_MEM_DATA_ENTRY (6020L)
+#define PM_TXRP_MEM_DATA_HOST_BUFFER (6021L)
+#define PM_TXRP_MEM_DATA_VLD (6022L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL (6023L)
+#define PM_TX_READ_POINTER_BLOCK_INTERVAL_D (6024L)
+/* PRM_NT100A01_01 */
+#define PRM_NT100A01_01_POWER (6025L)
+#define PRM_NT100A01_01_POWER_PU_NSEB (6026L)
+#define PRM_NT100A01_01_POWER_PU_PHY (6027L)
+#define PRM_NT100A01_01_RST (6028L)
+#define PRM_NT100A01_01_RST_PERIPH (6029L)
+#define PRM_NT100A01_01_RST_PLATFORM (6030L)
+/* PRM_NT50B01_01 */
+#define PRM_NT50B01_01_POWER (6031L)
+#define PRM_NT50B01_01_POWER_PU_NSEB (6032L)
+#define PRM_NT50B01_01_POWER_PU_PHY (6033L)
+#define PRM_NT50B01_01_RST (6034L)
+#define PRM_NT50B01_01_RST_PERIPH (6035L)
+#define PRM_NT50B01_01_RST_PLATFORM (6036L)
+/* PTP1588 */
+#define PTP1588_CONF (6037L)
+#define PTP1588_CONF_MII_RX_TX_LOOP (6038L)
+#define PTP1588_CONF_MII_TX_RX_LOOP (6039L)
+#define PTP1588_CONF_PHY_RST (6040L)
+#define PTP1588_CONF_PHY_RST1 (6041L)
+#define PTP1588_CONF_PHY_RST2 (6042L)
+#define PTP1588_CONF_PTP_CTRL_LOCAL (6043L)
+#define PTP1588_CONF_PTP_RX_CTRL (6044L)
+#define PTP1588_CONF_PTP_TX_CTRL (6045L)
+#define PTP1588_CONF_PTP_TX_CTRL_OS (6046L)
+#define PTP1588_CONF_RX_IGNORE_DEST_ADDR (6047L)
+#define PTP1588_CONF_TG_CMD (6048L)
+#define PTP1588_CONF_TG_MODE (6049L)
+#define PTP1588_CONF_TSM_MI_ACK (6050L)
+#define PTP1588_CONF_TSM_MI_BUSY (6051L)
+#define PTP1588_CONF_TSM_MI_ENA (6052L)
+#define PTP1588_CONF_TSM_MI_REQ (6053L)
+#define PTP1588_CONF_TX_IFG (6054L)
+#define PTP1588_CONF_TX_IGNORE_DEST_ADDR (6055L)
+#define PTP1588_CTRL (6056L)
+#define PTP1588_CTRL_CLK_ENABLE (6057L)
+#define PTP1588_CTRL_MII_RX_TX_LOOP (6058L)
+#define PTP1588_CTRL_MII_TX_RX_LOOP (6059L)
+#define PTP1588_CTRL_PRESENT (6060L)
+#define PTP1588_CTRL_RESET_N (6061L)
+#define PTP1588_CTRL_TS_MI_ACK (6062L)
+#define PTP1588_CTRL_TS_MI_BUSY (6063L)
+#define PTP1588_CTRL_TS_MI_ENA (6064L)
+#define PTP1588_CTRL_TS_MI_REQ (6065L)
+#define PTP1588_CTRL_TX_IFG (6066L)
+#define PTP1588_GP_DATA (6067L)
+#define PTP1588_GP_DATA_GPIO (6068L)
+#define PTP1588_GP_DATA_PWRDOWN_INTN (6069L)
+#define PTP1588_GP_DATA_TIMESYNC_CON (6070L)
+#define PTP1588_GP_DATA_LH (6071L)
+#define PTP1588_GP_DATA_LH_GPIO (6072L)
+#define PTP1588_GP_DATA_LH_PWRDOWN_INTN (6073L)
+#define PTP1588_GP_DATA_LH_TIMESYNC_CON (6074L)
+#define PTP1588_GP_DATA_LL (6075L)
+#define PTP1588_GP_DATA_LL_GPIO (6076L)
+#define PTP1588_GP_DATA_LL_PWRDOWN_INTN (6077L)
+#define PTP1588_GP_DATA_LL_TIMESYNC_CON (6078L)
+#define PTP1588_GP_OE (6079L)
+#define PTP1588_GP_OE_GPIO (6080L)
+#define PTP1588_GP_OE_PWRDOWN_INTN (6081L)
+#define PTP1588_GP_OE_TIMESYNC_CON (6082L)
+#define PTP1588_MAC_HOST_ADDR (6083L)
+#define PTP1588_MAC_HOST_ADDR_ADDR (6084L)
+#define PTP1588_MAC_HOST_ADDR_MDIO_ACCESS (6085L)
+#define PTP1588_MAC_HOST_ADDR_OPCODE (6086L)
+#define PTP1588_MAC_HOST_ADDR_RDY (6087L)
+#define PTP1588_MAC_HOST_DATA_LSB (6088L)
+#define PTP1588_MAC_HOST_DATA_LSB_DATA (6089L)
+#define PTP1588_MAC_HOST_DATA_MSB (6090L)
+#define PTP1588_MAC_HOST_DATA_MSB_DATA (6091L)
+#define PTP1588_MAC_INBAND_STAT (6092L)
+#define PTP1588_MAC_INBAND_STAT_DUPLEX (6093L)
+#define PTP1588_MAC_INBAND_STAT_LINK (6094L)
+#define PTP1588_MAC_INBAND_STAT_SPEED (6095L)
+#define PTP1588_MAC_MI_CONF (6096L)
+#define PTP1588_MAC_MI_CONF_ACCESS_TYPE (6097L)
+#define PTP1588_MAC_MI_CONF_ADDRESS (6098L)
+#define PTP1588_MAC_MI_CONF_RDY (6099L)
+#define PTP1588_MAC_MI_DATA (6100L)
+#define PTP1588_MAC_MI_DATA_DATA (6101L)
+#define PTP1588_RX_HOST_ADR_LSB (6102L)
+#define PTP1588_RX_HOST_ADR_LSB_LSB (6103L)
+#define PTP1588_RX_HOST_ADR_MSB (6104L)
+#define PTP1588_RX_HOST_ADR_MSB_MSB (6105L)
+#define PTP1588_RX_HOST_CONF (6106L)
+#define PTP1588_RX_HOST_CONF_ENA (6107L)
+#define PTP1588_RX_HOST_CONF_RDPTR (6108L)
+#define PTP1588_RX_HOST_CONF_REDUCED (6109L)
+#define PTP1588_RX_HOST_CTRL (6110L)
+#define PTP1588_RX_HOST_CTRL_ENA (6111L)
+#define PTP1588_RX_HOST_CTRL_RDPTR (6112L)
+#define PTP1588_RX_HOST_CTRL_REDUCED (6113L)
+#define PTP1588_STAT (6114L)
+#define PTP1588_STAT_DATA (6115L)
+#define PTP1588_STAT_CONF (6116L)
+#define PTP1588_STAT_CONF_INDEX (6117L)
+#define PTP1588_STAT_CONF_LOCK (6118L)
+#define PTP1588_STAT_CTRL (6119L)
+#define PTP1588_STAT_CTRL_INDEX (6120L)
+#define PTP1588_STAT_CTRL_LOCK (6121L)
+#define PTP1588_TX_FIRST_DAT (6122L)
+#define PTP1588_TX_FIRST_DAT_DAT (6123L)
+#define PTP1588_TX_LAST1_DAT (6124L)
+#define PTP1588_TX_LAST1_DAT_DAT (6125L)
+#define PTP1588_TX_LAST2_DAT (6126L)
+#define PTP1588_TX_LAST2_DAT_DAT (6127L)
+#define PTP1588_TX_LAST3_DAT (6128L)
+#define PTP1588_TX_LAST3_DAT_DAT (6129L)
+#define PTP1588_TX_LAST4_DAT (6130L)
+#define PTP1588_TX_LAST4_DAT_DAT (6131L)
+#define PTP1588_TX_MID_DAT (6132L)
+#define PTP1588_TX_MID_DAT_DAT (6133L)
+#define PTP1588_TX_PACKET_STATE (6134L)
+#define PTP1588_TX_PACKET_STATE_MSG_TYPE (6135L)
+#define PTP1588_TX_PACKET_STATE_PCK_TYPE (6136L)
+#define PTP1588_TX_PACKET_STATE_SEQ_ID (6137L)
+#define PTP1588_TX_PACKET_STATE_TEST_MARGIN (6138L)
+#define PTP1588_TX_PACKET_STATE_VALID (6139L)
+#define PTP1588_TX_STATUS (6140L)
+#define PTP1588_TX_STATUS_DB_ERR (6141L)
+#define PTP1588_TX_STATUS_DB_FULL (6142L)
+#define PTP1588_TX_STATUS_FIFO_STATUS (6143L)
+#define PTP1588_TX_STATUS_RDY (6144L)
+#define PTP1588_TX_STATUS_TG_ENA (6145L)
+#define PTP1588_TX_STATUS_TG_MODE (6146L)
+#define PTP1588_TX_TIMESTAMP_NS (6147L)
+#define PTP1588_TX_TIMESTAMP_NS_TIMESTAMP (6148L)
+#define PTP1588_TX_TIMESTAMP_SEC (6149L)
+#define PTP1588_TX_TIMESTAMP_SEC_TIMESTAMP (6150L)
+/* QM */
+#define QM_BLOCK_SIZE (6151L)
+#define QM_BLOCK_SIZE_CELLS (6152L)
+#define QM_CTRL (6153L)
+#define QM_CTRL_ACTIVE_QUEUES (6154L)
+#define QM_CTRL_ACTIVE_QUEUES_QPI_BYPASS (6155L)
+#define QM_CTRL_ENABLE (6156L)
+#define QM_CTRL_PRIORITY_SCHEME (6157L)
+#define QM_DEBUG_BLOCK_SIZE (6158L)
+#define QM_DEBUG_BLOCK_SIZE_CELLS (6159L)
+#define QM_DEBUG_CRC (6160L)
+#define QM_DEBUG_CRC_FORCE_ERROR (6161L)
+#define QM_DEBUG_SDRAM_SIZE (6162L)
+#define QM_DEBUG_SDRAM_SIZE_MASK (6163L)
+#define QM_GROUP_LIMIT_MEM_CTRL (6164L)
+#define QM_GROUP_LIMIT_MEM_CTRL_A (6165L)
+#define QM_GROUP_LIMIT_MEM_CTRL_CNT (6166L)
+#define QM_GROUP_LIMIT_MEM_DATA (6167L)
+#define QM_GROUP_LIMIT_MEM_DATA_LIMIT (6168L)
+#define QM_GROUP_MAPPING_MEM_CTRL (6169L)
+#define QM_GROUP_MAPPING_MEM_CTRL_A (6170L)
+#define QM_GROUP_MAPPING_MEM_CTRL_CNT (6171L)
+#define QM_GROUP_MAPPING_MEM_DATA (6172L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP0 (6173L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP1 (6174L)
+#define QM_GROUP_MAPPING_MEM_DATA_GROUP2 (6175L)
+#define QM_PRIO_LEVELS_MEM_CTRL (6176L)
+#define QM_PRIO_LEVELS_MEM_CTRL_A (6177L)
+#define QM_PRIO_LEVELS_MEM_CTRL_CNT (6178L)
+#define QM_PRIO_LEVELS_MEM_DATA (6179L)
+#define QM_PRIO_LEVELS_MEM_DATA_PRIO (6180L)
+#define QM_QUEUE_LIMIT_MEM_CTRL (6181L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_A (6182L)
+#define QM_QUEUE_LIMIT_MEM_CTRL_CNT (6183L)
+#define QM_QUEUE_LIMIT_MEM_DATA (6184L)
+#define QM_QUEUE_LIMIT_MEM_DATA_LIMIT (6185L)
+#define QM_STATUS_BLOCKED (6186L)
+#define QM_STATUS_BLOCKED_D (6187L)
+#define QM_STATUS_BLOCKED_HI (6188L)
+#define QM_STATUS_BLOCKED_HI_D (6189L)
+#define QM_STATUS_BLOCKING (6190L)
+#define QM_STATUS_BLOCKING_D (6191L)
+#define QM_STATUS_BLOCKING_HI (6192L)
+#define QM_STATUS_BLOCKING_HI_D (6193L)
+#define QM_STATUS_CRC_ERROR (6194L)
+#define QM_STATUS_CRC_ERROR_CNT (6195L)
+#define QM_STATUS_EMPTY (6196L)
+#define QM_STATUS_EMPTY_D (6197L)
+#define QM_STATUS_EMPTY_HI (6198L)
+#define QM_STATUS_EMPTY_HI_D (6199L)
+#define QM_STATUS_FLUSH_DROP (6200L)
+#define QM_STATUS_FLUSH_DROP_CNT (6201L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE (6202L)
+#define QM_STATUS_SDRAM_BLOCK_MAX_USAGE_D (6203L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE (6204L)
+#define QM_STATUS_SDRAM_BLOCK_USAGE_D (6205L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE (6206L)
+#define QM_STATUS_SDRAM_CELL_MAX_USAGE_D (6207L)
+#define QM_STATUS_SDRAM_CELL_USAGE (6208L)
+#define QM_STATUS_SDRAM_CELL_USAGE_D (6209L)
+/* QSL */
+#define QSL_LTX_CTRL (6210L)
+#define QSL_LTX_CTRL_ADR (6211L)
+#define QSL_LTX_CTRL_CNT (6212L)
+#define QSL_LTX_DATA (6213L)
+#define QSL_LTX_DATA_LR (6214L)
+#define QSL_LTX_DATA_TSA (6215L)
+#define QSL_LTX_DATA_TX_PORT (6216L)
+#define QSL_QEN_CTRL (6217L)
+#define QSL_QEN_CTRL_ADR (6218L)
+#define QSL_QEN_CTRL_CNT (6219L)
+#define QSL_QEN_DATA (6220L)
+#define QSL_QEN_DATA_EN (6221L)
+#define QSL_QST_CTRL (6222L)
+#define QSL_QST_CTRL_ADR (6223L)
+#define QSL_QST_CTRL_CNT (6224L)
+#define QSL_QST_DATA (6225L)
+#define QSL_QST_DATA_EN (6226L)
+#define QSL_QST_DATA_LRE (6227L)
+#define QSL_QST_DATA_QEN (6228L)
+#define QSL_QST_DATA_QUEUE (6229L)
+#define QSL_QST_DATA_TCI (6230L)
+#define QSL_QST_DATA_TX_PORT (6231L)
+#define QSL_QST_DATA_VEN (6232L)
+#define QSL_RCP_CTRL (6233L)
+#define QSL_RCP_CTRL_ADR (6234L)
+#define QSL_RCP_CTRL_CNT (6235L)
+#define QSL_RCP_DATA (6236L)
+#define QSL_RCP_DATA_CAO (6237L)
+#define QSL_RCP_DATA_DISCARD (6238L)
+#define QSL_RCP_DATA_DROP (6239L)
+#define QSL_RCP_DATA_LR (6240L)
+#define QSL_RCP_DATA_TBL_HI (6241L)
+#define QSL_RCP_DATA_TBL_IDX (6242L)
+#define QSL_RCP_DATA_TBL_LO (6243L)
+#define QSL_RCP_DATA_TBL_MSK (6244L)
+#define QSL_RCP_DATA_TSA (6245L)
+#define QSL_RCP_DATA_VLI (6246L)
+#define QSL_UNMQ_CTRL (6247L)
+#define QSL_UNMQ_CTRL_ADR (6248L)
+#define QSL_UNMQ_CTRL_CNT (6249L)
+#define QSL_UNMQ_DATA (6250L)
+#define QSL_UNMQ_DATA_DEST_QUEUE (6251L)
+#define QSL_UNMQ_DATA_EN (6252L)
+/* QSPI */
+#define QSPI_CR (6253L)
+#define QSPI_CR_CPHA (6254L)
+#define QSPI_CR_CPOL (6255L)
+#define QSPI_CR_LOOP (6256L)
+#define QSPI_CR_LSBF (6257L)
+#define QSPI_CR_MSSAE (6258L)
+#define QSPI_CR_MST (6259L)
+#define QSPI_CR_MTI (6260L)
+#define QSPI_CR_RXFIFO_RST (6261L)
+#define QSPI_CR_SPE (6262L)
+#define QSPI_CR_TXFIFO_RST (6263L)
+#define QSPI_DGIE (6264L)
+#define QSPI_DGIE_GIE (6265L)
+#define QSPI_DRR (6266L)
+#define QSPI_DRR_DATA_VAL (6267L)
+#define QSPI_DTR (6268L)
+#define QSPI_DTR_DATA_VAL (6269L)
+#define QSPI_IER (6270L)
+#define QSPI_IER_CMD_ERR (6271L)
+#define QSPI_IER_CPOL_CPHA_ERR (6272L)
+#define QSPI_IER_DRR_FULL (6273L)
+#define QSPI_IER_DRR_NEMPTY (6274L)
+#define QSPI_IER_DRR_OR (6275L)
+#define QSPI_IER_DTR_EMPTY (6276L)
+#define QSPI_IER_DTR_UR (6277L)
+#define QSPI_IER_LOOP_ERR (6278L)
+#define QSPI_IER_MODF (6279L)
+#define QSPI_IER_MSB_ERR (6280L)
+#define QSPI_IER_SLV_ERR (6281L)
+#define QSPI_IER_SLV_MODF (6282L)
+#define QSPI_IER_SLV_MS (6283L)
+#define QSPI_IER_TXFIFO_HEMPTY (6284L)
+#define QSPI_ISR (6285L)
+#define QSPI_ISR_CMD_ERR (6286L)
+#define QSPI_ISR_CPOL_CPHA_ERR (6287L)
+#define QSPI_ISR_DRR_FULL (6288L)
+#define QSPI_ISR_DRR_NEMPTY (6289L)
+#define QSPI_ISR_DRR_OR (6290L)
+#define QSPI_ISR_DTR_EMPTY (6291L)
+#define QSPI_ISR_DTR_UR (6292L)
+#define QSPI_ISR_LOOP_ERR (6293L)
+#define QSPI_ISR_MODF (6294L)
+#define QSPI_ISR_MSB_ERR (6295L)
+#define QSPI_ISR_SLV_ERR (6296L)
+#define QSPI_ISR_SLV_MODF (6297L)
+#define QSPI_ISR_SLV_MS (6298L)
+#define QSPI_ISR_TXFIFO_HEMPTY (6299L)
+#define QSPI_RX_FIFO_OCY (6300L)
+#define QSPI_RX_FIFO_OCY_OCY_VAL (6301L)
+#define QSPI_SR (6302L)
+#define QSPI_SR_CMD_ERR (6303L)
+#define QSPI_SR_CPOL_CPHA_ERR (6304L)
+#define QSPI_SR_LOOP_ERR (6305L)
+#define QSPI_SR_MODF (6306L)
+#define QSPI_SR_MSB_ERR (6307L)
+#define QSPI_SR_RXEMPTY (6308L)
+#define QSPI_SR_RXFULL (6309L)
+#define QSPI_SR_SLVMS (6310L)
+#define QSPI_SR_SLV_ERR (6311L)
+#define QSPI_SR_TXEMPTY (6312L)
+#define QSPI_SR_TXFULL (6313L)
+#define QSPI_SRR (6314L)
+#define QSPI_SRR_RST (6315L)
+#define QSPI_SSR (6316L)
+#define QSPI_SSR_SEL_SLV (6317L)
+#define QSPI_TX_FIFO_OCY (6318L)
+#define QSPI_TX_FIFO_OCY_OCY_VAL (6319L)
+/* R2DRP */
+#define R2DRP_CTRL (6320L)
+#define R2DRP_CTRL_ADR (6321L)
+#define R2DRP_CTRL_DATA (6322L)
+#define R2DRP_CTRL_DBG_BUSY (6323L)
+#define R2DRP_CTRL_DONE (6324L)
+#define R2DRP_CTRL_RES (6325L)
+#define R2DRP_CTRL_WREN (6326L)
+/* RAC */
+#define RAC_DBG_CTRL (6327L)
+#define RAC_DBG_CTRL_C (6328L)
+#define RAC_DBG_DATA (6329L)
+#define RAC_DBG_DATA_D (6330L)
+#define RAC_DUMMY0 (6331L)
+#define RAC_DUMMY1 (6332L)
+#define RAC_DUMMY2 (6333L)
+#define RAC_NDM_REGISTER (6334L)
+#define RAC_NDM_REGISTER_NDM (6335L)
+#define RAC_NMB_DATA (6336L)
+#define RAC_NMB_DATA_NMB_DATA (6337L)
+#define RAC_NMB_RD_ADR (6338L)
+#define RAC_NMB_RD_ADR_ADR (6339L)
+#define RAC_NMB_RD_ADR_RES (6340L)
+#define RAC_NMB_STATUS (6341L)
+#define RAC_NMB_STATUS_BUS_TIMEOUT (6342L)
+#define RAC_NMB_STATUS_NMB_READY (6343L)
+#define RAC_NMB_WR_ADR (6344L)
+#define RAC_NMB_WR_ADR_ADR (6345L)
+#define RAC_NMB_WR_ADR_RES (6346L)
+#define RAC_RAB_BUF_FREE (6347L)
+#define RAC_RAB_BUF_FREE_IB_FREE (6348L)
+#define RAC_RAB_BUF_FREE_IB_OVF (6349L)
+#define RAC_RAB_BUF_FREE_OB_FREE (6350L)
+#define RAC_RAB_BUF_FREE_OB_OVF (6351L)
+#define RAC_RAB_BUF_FREE_TIMEOUT (6352L)
+#define RAC_RAB_BUF_USED (6353L)
+#define RAC_RAB_BUF_USED_FLUSH (6354L)
+#define RAC_RAB_BUF_USED_IB_USED (6355L)
+#define RAC_RAB_BUF_USED_OB_USED (6356L)
+#define RAC_RAB_DMA_IB_HI (6357L)
+#define RAC_RAB_DMA_IB_HI_PHYADDR (6358L)
+#define RAC_RAB_DMA_IB_LO (6359L)
+#define RAC_RAB_DMA_IB_LO_PHYADDR (6360L)
+#define RAC_RAB_DMA_IB_RD (6361L)
+#define RAC_RAB_DMA_IB_RD_PTR (6362L)
+#define RAC_RAB_DMA_IB_WR (6363L)
+#define RAC_RAB_DMA_IB_WR_PTR (6364L)
+#define RAC_RAB_DMA_OB_HI (6365L)
+#define RAC_RAB_DMA_OB_HI_PHYADDR (6366L)
+#define RAC_RAB_DMA_OB_LO (6367L)
+#define RAC_RAB_DMA_OB_LO_PHYADDR (6368L)
+#define RAC_RAB_DMA_OB_WR (6369L)
+#define RAC_RAB_DMA_OB_WR_PTR (6370L)
+#define RAC_RAB_IB_DATA (6371L)
+#define RAC_RAB_IB_DATA_D (6372L)
+#define RAC_RAB_INIT (6373L)
+#define RAC_RAB_INIT_RAB (6374L)
+#define RAC_RAB_OB_DATA (6375L)
+#define RAC_RAB_OB_DATA_D (6376L)
+/* RBH */
+#define RBH_CTRL (6377L)
+#define RBH_CTRL_ENABLE_METADATA_HB (6378L)
+#define RBH_CTRL_ENABLE_PM (6379L)
+#define RBH_CTRL_ENABLE_QHM (6380L)
+#define RBH_CTRL_HB_MAX (6381L)
+#define RBH_CTRL_HB_SEGMENT_SIZE (6382L)
+#define RBH_CTRL_RESERVED (6383L)
+#define RBH_CTRL_RESET_CREDITS_QHM (6384L)
+#define RBH_DEB_REG1 (6385L)
+#define RBH_DEB_REG1_VALUE (6386L)
+#define RBH_DEB_REG2 (6387L)
+#define RBH_DEB_REG2_VALUE (6388L)
+#define RBH_DEB_REG3 (6389L)
+#define RBH_DEB_REG3_VALUE (6390L)
+#define RBH_FLUSH_ADR_HI (6391L)
+#define RBH_FLUSH_ADR_HI_VALUE (6392L)
+#define RBH_FLUSH_ADR_LO (6393L)
+#define RBH_FLUSH_ADR_LO_VALUE (6394L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL (6395L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_A (6396L)
+#define RBH_HOST_BUF_SIZE_MEM_CTRL_CNT (6397L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA (6398L)
+#define RBH_HOST_BUF_SIZE_MEM_DATA_HB_SEGMENT_CNT (6399L)
+#define RBH_STATUS (6400L)
+#define RBH_STATUS_BUFFER_EMPTY (6401L)
+#define RBH_STATUS_DWA_DATAFIFO_EMPTY (6402L)
+#define RBH_STATUS_PWA_DATAFIFO_EMPTY (6403L)
+#define RBH_WP_SET_MEM_CTRL (6404L)
+#define RBH_WP_SET_MEM_CTRL_A (6405L)
+#define RBH_WP_SET_MEM_CTRL_CNT (6406L)
+#define RBH_WP_SET_MEM_DATA (6407L)
+#define RBH_WP_SET_MEM_DATA_WP (6408L)
+/* RFD */
+#define RFD_CTRL (6409L)
+#define RFD_CTRL_CFP (6410L)
+#define RFD_CTRL_ISL (6411L)
+#define RFD_CTRL_PWMCW (6412L)
+#define RFD_MAX_FRAME_SIZE (6413L)
+#define RFD_MAX_FRAME_SIZE_MAX (6414L)
+#define RFD_TNL_VLAN (6415L)
+#define RFD_TNL_VLAN_TPID0 (6416L)
+#define RFD_TNL_VLAN_TPID1 (6417L)
+#define RFD_VLAN (6418L)
+#define RFD_VLAN_TPID0 (6419L)
+#define RFD_VLAN_TPID1 (6420L)
+#define RFD_VXLAN (6421L)
+#define RFD_VXLAN_DP0 (6422L)
+#define RFD_VXLAN_DP1 (6423L)
+/* RMC */
+#define RMC_CTRL (6424L)
+#define RMC_CTRL_BLOCK_KEEPA (6425L)
+#define RMC_CTRL_BLOCK_MAC_PORT (6426L)
+#define RMC_CTRL_BLOCK_RPP_SLICE (6427L)
+#define RMC_CTRL_BLOCK_STATT (6428L)
+#define RMC_CTRL_LAG_PHY_ODD_EVEN (6429L)
+#define RMC_DBG (6430L)
+#define RMC_DBG_MERGE (6431L)
+#define RMC_MAC_IF (6432L)
+#define RMC_MAC_IF_ERR (6433L)
+#define RMC_STATUS (6434L)
+#define RMC_STATUS_DESCR_FIFO_OF (6435L)
+#define RMC_STATUS_SF_RAM_OF (6436L)
+/* RNTC */
+#define RNTC_CTRL (6437L)
+#define RNTC_CTRL_RNTC_ENA (6438L)
+#define RNTC_STAT (6439L)
+#define RNTC_STAT_EMPTY (6440L)
+#define RNTC_TX_DATA (6441L)
+#define RNTC_TX_DATA_D (6442L)
+/* ROA */
+#define ROA_CONFIG (6443L)
+#define ROA_CONFIG_FWD_CELLBUILDER_PCKS (6444L)
+#define ROA_CONFIG_FWD_NON_NORMAL_PCKS (6445L)
+#define ROA_CONFIG_FWD_NORMAL_PCKS (6446L)
+#define ROA_CONFIG_FWD_RECIRCULATE (6447L)
+#define ROA_CONFIG_FWD_TXPORT0 (6448L)
+#define ROA_CONFIG_FWD_TXPORT1 (6449L)
+#define ROA_IGS (6450L)
+#define ROA_IGS_BYTE (6451L)
+#define ROA_IGS_BYTE_DROP (6452L)
+#define ROA_IGS_PKT (6453L)
+#define ROA_IGS_PKT_DROP (6454L)
+#define ROA_LAGCFG_CTRL (6455L)
+#define ROA_LAGCFG_CTRL_ADR (6456L)
+#define ROA_LAGCFG_CTRL_CNT (6457L)
+#define ROA_LAGCFG_DATA (6458L)
+#define ROA_LAGCFG_DATA_TXPHY_PORT (6459L)
+#define ROA_RCC (6460L)
+#define ROA_RCC_BYTE (6461L)
+#define ROA_RCC_BYTE_DROP (6462L)
+#define ROA_RCC_PKT (6463L)
+#define ROA_RCC_PKT_DROP (6464L)
+#define ROA_TUNCFG_CTRL (6465L)
+#define ROA_TUNCFG_CTRL_ADR (6466L)
+#define ROA_TUNCFG_CTRL_CNT (6467L)
+#define ROA_TUNCFG_DATA (6468L)
+#define ROA_TUNCFG_DATA_PUSH_TUNNEL (6469L)
+#define ROA_TUNCFG_DATA_RECIRCULATE (6470L)
+#define ROA_TUNCFG_DATA_RECIRC_BYPASS (6471L)
+#define ROA_TUNCFG_DATA_RECIRC_PORT (6472L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_PRECALC (6473L)
+#define ROA_TUNCFG_DATA_TUN_IPCS_UPD (6474L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_PRECALC (6475L)
+#define ROA_TUNCFG_DATA_TUN_IPTL_UPD (6476L)
+#define ROA_TUNCFG_DATA_TUN_IP_TYPE (6477L)
+#define ROA_TUNCFG_DATA_TUN_LEN (6478L)
+#define ROA_TUNCFG_DATA_TUN_TYPE (6479L)
+#define ROA_TUNCFG_DATA_TUN_VLAN (6480L)
+#define ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD (6481L)
+#define ROA_TUNCFG_DATA_TX_LAG_IX (6482L)
+#define ROA_TUNHDR_CTRL (6483L)
+#define ROA_TUNHDR_CTRL_ADR (6484L)
+#define ROA_TUNHDR_CTRL_CNT (6485L)
+#define ROA_TUNHDR_DATA (6486L)
+#define ROA_TUNHDR_DATA_TUNNEL_HDR (6487L)
+/* RPL */
+#define RPL_EXT_CTRL (6488L)
+#define RPL_EXT_CTRL_ADR (6489L)
+#define RPL_EXT_CTRL_CNT (6490L)
+#define RPL_EXT_DATA (6491L)
+#define RPL_EXT_DATA_RPL_PTR (6492L)
+#define RPL_RCP_CTRL (6493L)
+#define RPL_RCP_CTRL_ADR (6494L)
+#define RPL_RCP_CTRL_CNT (6495L)
+#define RPL_RCP_DATA (6496L)
+#define RPL_RCP_DATA_DYN (6497L)
+#define RPL_RCP_DATA_EXT_PRIO (6498L)
+#define RPL_RCP_DATA_LEN (6499L)
+#define RPL_RCP_DATA_OFS (6500L)
+#define RPL_RCP_DATA_RPL_PTR (6501L)
+#define RPL_RPL_CTRL (6502L)
+#define RPL_RPL_CTRL_ADR (6503L)
+#define RPL_RPL_CTRL_CNT (6504L)
+#define RPL_RPL_DATA (6505L)
+#define RPL_RPL_DATA_VALUE (6506L)
+/* RPP_LR */
+#define RPP_LR_IFR_RCP_CTRL (6507L)
+#define RPP_LR_IFR_RCP_CTRL_ADR (6508L)
+#define RPP_LR_IFR_RCP_CTRL_CNT (6509L)
+#define RPP_LR_IFR_RCP_DATA (6510L)
+#define RPP_LR_IFR_RCP_DATA_EN (6511L)
+#define RPP_LR_IFR_RCP_DATA_MTU (6512L)
+#define RPP_LR_RCP_CTRL (6513L)
+#define RPP_LR_RCP_CTRL_ADR (6514L)
+#define RPP_LR_RCP_CTRL_CNT (6515L)
+#define RPP_LR_RCP_DATA (6516L)
+#define RPP_LR_RCP_DATA_EXP (6517L)
+/* RST7000 */
+#define RST7000_RST (6518L)
+#define RST7000_RST_SYS (6519L)
+/* RST7001 */
+#define RST7001_RST (6520L)
+#define RST7001_RST_SYS (6521L)
+/* RST9500 */
+#define RST9500_CTRL (6598L)
+#define RST9500_CTRL_PTP_MMCM_CLKSEL (6599L)
+#define RST9500_CTRL_TS_CLKSEL (6600L)
+#define RST9500_CTRL_TS_CLKSEL_OVERRIDE (6601L)
+#define RST9500_RST (6602L)
+#define RST9500_RST_DDR3 (6603L)
+#define RST9500_RST_DDR3_IDLY_MMCM (6604L)
+#define RST9500_RST_PERIPH (6605L)
+#define RST9500_RST_PHY10G_QPLL (6606L)
+#define RST9500_RST_PHY3S10G (6607L)
+#define RST9500_RST_PHY3S_MMCM (6608L)
+#define RST9500_RST_PTP (6609L)
+#define RST9500_RST_PTP_MMCM (6610L)
+#define RST9500_RST_RPP (6611L)
+#define RST9500_RST_SDC (6612L)
+#define RST9500_RST_SI5326 (6613L)
+#define RST9500_RST_SYS (6614L)
+#define RST9500_RST_TS (6615L)
+#define RST9500_RST_TS_MMCM (6616L)
+#define RST9500_RST_STAT (6617L)
+#define RST9500_RST_STAT_PCS_RESET_BY_SERDES (6618L)
+#define RST9500_STAT (6619L)
+#define RST9500_STAT_DDR3_IDLY_MMCM_LOCKED (6620L)
+#define RST9500_STAT_DDR3_MMCM_LOCKED (6621L)
+#define RST9500_STAT_DDR3_PLL_LOCKED (6622L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_0_1 (6623L)
+#define RST9500_STAT_PHY10G_QPLL_LOCK_2_3 (6624L)
+#define RST9500_STAT_PHY3S_MMCM_LOCKED (6625L)
+#define RST9500_STAT_PTP_MMCM_LOCKED (6626L)
+#define RST9500_STAT_SYNCE_MAIN_CLK_LOS (6627L)
+#define RST9500_STAT_SYS_MMCM_LOCKED (6628L)
+#define RST9500_STAT_TS_MMCM_LOCKED (6629L)
+#define RST9500_STICKY (6630L)
+#define RST9500_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6631L)
+#define RST9500_STICKY_DDR3_MMCM_UNLOCKED (6632L)
+#define RST9500_STICKY_DDR3_PLL_UNLOCKED (6633L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6634L)
+#define RST9500_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6635L)
+#define RST9500_STICKY_PHY3S_MMCM_UNLOCKED (6636L)
+#define RST9500_STICKY_PTP_MMCM_UNLOCKED (6637L)
+#define RST9500_STICKY_TS_MMCM_UNLOCKED (6638L)
+/* RST9501 */
+#define RST9501_CTRL (6639L)
+#define RST9501_CTRL_PTP_MMCM_CLKSEL (6640L)
+#define RST9501_CTRL_TS_CLKSEL (6641L)
+#define RST9501_CTRL_TS_CLKSEL_OVERRIDE (6642L)
+#define RST9501_RST (6643L)
+#define RST9501_RST_DDR3 (6644L)
+#define RST9501_RST_DDR3_IDLY_MMCM (6645L)
+#define RST9501_RST_PERIPH (6646L)
+#define RST9501_RST_PHY10G_QPLL (6647L)
+#define RST9501_RST_PHY3S10G (6648L)
+#define RST9501_RST_PHY3S_MMCM (6649L)
+#define RST9501_RST_PTP (6650L)
+#define RST9501_RST_PTP_MMCM (6651L)
+#define RST9501_RST_RPP (6652L)
+#define RST9501_RST_SDC (6653L)
+#define RST9501_RST_SI5326 (6654L)
+#define RST9501_RST_SYS (6655L)
+#define RST9501_RST_TS (6656L)
+#define RST9501_RST_TS_MMCM (6657L)
+#define RST9501_RST_STAT (6658L)
+#define RST9501_RST_STAT_PCS_RESET_BY_SERDES (6659L)
+#define RST9501_STAT (6660L)
+#define RST9501_STAT_DDR3_IDLY_MMCM_LOCKED (6661L)
+#define RST9501_STAT_DDR3_MMCM_LOCKED (6662L)
+#define RST9501_STAT_DDR3_PLL_LOCKED (6663L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_0_1 (6664L)
+#define RST9501_STAT_PHY10G_QPLL_LOCK_2_3 (6665L)
+#define RST9501_STAT_PHY3S_MMCM_LOCKED (6666L)
+#define RST9501_STAT_PTP_MMCM_LOCKED (6667L)
+#define RST9501_STAT_SYNCE_MAIN_CLK_LOS (6668L)
+#define RST9501_STAT_SYS_MMCM_LOCKED (6669L)
+#define RST9501_STAT_TS_MMCM_LOCKED (6670L)
+#define RST9501_STICKY (6671L)
+#define RST9501_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6672L)
+#define RST9501_STICKY_DDR3_MMCM_UNLOCKED (6673L)
+#define RST9501_STICKY_DDR3_PLL_UNLOCKED (6674L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6675L)
+#define RST9501_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6676L)
+#define RST9501_STICKY_PHY3S_MMCM_UNLOCKED (6677L)
+#define RST9501_STICKY_PTP_MMCM_UNLOCKED (6678L)
+#define RST9501_STICKY_TS_MMCM_UNLOCKED (6679L)
+/* RST9502 */
+#define RST9502_CTRL (6680L)
+#define RST9502_CTRL_PTP_MMCM_CLKSEL (6681L)
+#define RST9502_CTRL_TS_CLKSEL (6682L)
+#define RST9502_CTRL_TS_CLKSEL_OVERRIDE (6683L)
+#define RST9502_RST (6684L)
+#define RST9502_RST_DDR3 (6685L)
+#define RST9502_RST_DDR3_IDLY_MMCM (6686L)
+#define RST9502_RST_NSEB (6687L)
+#define RST9502_RST_PERIPH (6688L)
+#define RST9502_RST_PHY10G_QPLL (6689L)
+#define RST9502_RST_PHY3S10G (6690L)
+#define RST9502_RST_PHY3S_MMCM (6691L)
+#define RST9502_RST_PTP (6692L)
+#define RST9502_RST_PTP_MMCM (6693L)
+#define RST9502_RST_RPP (6694L)
+#define RST9502_RST_SDC (6695L)
+#define RST9502_RST_SI5326 (6696L)
+#define RST9502_RST_SYS (6697L)
+#define RST9502_RST_TS (6698L)
+#define RST9502_RST_TS_MMCM (6699L)
+#define RST9502_RST_STAT (6700L)
+#define RST9502_RST_STAT_PCS_RESET_BY_SERDES (6701L)
+#define RST9502_STAT (6702L)
+#define RST9502_STAT_DDR3_IDLY_MMCM_LOCKED (6703L)
+#define RST9502_STAT_DDR3_MMCM_LOCKED (6704L)
+#define RST9502_STAT_DDR3_PLL_LOCKED (6705L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_0_1 (6706L)
+#define RST9502_STAT_PHY10G_QPLL_LOCK_2_3 (6707L)
+#define RST9502_STAT_PHY3S_MMCM_LOCKED (6708L)
+#define RST9502_STAT_PTP_MMCM_LOCKED (6709L)
+#define RST9502_STAT_SYNCE_MAIN_CLK_LOS (6710L)
+#define RST9502_STAT_SYS_MMCM_LOCKED (6711L)
+#define RST9502_STAT_TS_MMCM_LOCKED (6712L)
+#define RST9502_STICKY (6713L)
+#define RST9502_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6714L)
+#define RST9502_STICKY_DDR3_MMCM_UNLOCKED (6715L)
+#define RST9502_STICKY_DDR3_PLL_UNLOCKED (6716L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_0_1 (6717L)
+#define RST9502_STICKY_PHY10G_QPLL_UNLOCK_2_3 (6718L)
+#define RST9502_STICKY_PHY3S_MMCM_UNLOCKED (6719L)
+#define RST9502_STICKY_PTP_MMCM_UNLOCKED (6720L)
+#define RST9502_STICKY_TS_MMCM_UNLOCKED (6721L)
+/* RST9503 */
+#define RST9503_CTRL (6722L)
+#define RST9503_CTRL_PTP_MMCM_CLKSEL (6723L)
+#define RST9503_CTRL_TS_CLKSEL (6724L)
+#define RST9503_CTRL_TS_CLKSEL_OVERRIDE (6725L)
+#define RST9503_PORT_CLK_SEL (6726L)
+#define RST9503_PORT_CLK_SEL_PORT0 (6727L)
+#define RST9503_PORT_CLK_SEL_PORT1 (6728L)
+#define RST9503_RST (6729L)
+#define RST9503_RST_DDR3 (6730L)
+#define RST9503_RST_DDR3_IDLY_MMCM (6731L)
+#define RST9503_RST_MAC_RX (6732L)
+#define RST9503_RST_MAC_RX_MMCM (6733L)
+#define RST9503_RST_MAC_TX (6734L)
+#define RST9503_RST_NSEB (6735L)
+#define RST9503_RST_PCS_RX (6736L)
+#define RST9503_RST_PERIPH (6737L)
+#define RST9503_RST_PHY40G (6738L)
+#define RST9503_RST_PTP (6739L)
+#define RST9503_RST_PTP_MMCM (6740L)
+#define RST9503_RST_RPP (6741L)
+#define RST9503_RST_SDC (6742L)
+#define RST9503_RST_SERDES_RX (6743L)
+#define RST9503_RST_SERDES_TX (6744L)
+#define RST9503_RST_SI5326 (6745L)
+#define RST9503_RST_SYS (6746L)
+#define RST9503_RST_TS (6747L)
+#define RST9503_RST_TS_MMCM (6748L)
+#define RST9503_RST_STAT (6749L)
+#define RST9503_RST_STAT_PCS_RESET_BY_SERDES (6750L)
+#define RST9503_STAT (6751L)
+#define RST9503_STAT_DDR3_IDLY_MMCM_LOCKED (6752L)
+#define RST9503_STAT_DDR3_MMCM_LOCKED (6753L)
+#define RST9503_STAT_DDR3_PLL_LOCKED (6754L)
+#define RST9503_STAT_MAC_RX_MMCM_LOCKED (6755L)
+#define RST9503_STAT_PTP_MMCM_LOCKED (6756L)
+#define RST9503_STAT_SYNCE_MAIN_CLK_LOS (6757L)
+#define RST9503_STAT_SYS_MMCM_LOCKED (6758L)
+#define RST9503_STAT_TS_MMCM_LOCKED (6759L)
+#define RST9503_STICKY (6760L)
+#define RST9503_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6761L)
+#define RST9503_STICKY_DDR3_MMCM_UNLOCKED (6762L)
+#define RST9503_STICKY_DDR3_PLL_UNLOCKED (6763L)
+#define RST9503_STICKY_MAC_RX_MMCM_UNLOCKED (6764L)
+#define RST9503_STICKY_PTP_MMCM_UNLOCKED (6765L)
+#define RST9503_STICKY_TS_MMCM_UNLOCKED (6766L)
+/* RST9504 */
+#define RST9504_CTRL (6767L)
+#define RST9504_CTRL_PTP_MMCM_CLKSEL (6768L)
+#define RST9504_CTRL_TS_CLKSEL (6769L)
+#define RST9504_CTRL_TS_CLKSEL_OVERRIDE (6770L)
+#define RST9504_RST (6771L)
+#define RST9504_RST_DDR3 (6772L)
+#define RST9504_RST_DDR3_IDLY_MMCM (6773L)
+#define RST9504_RST_MAC_RX (6774L)
+#define RST9504_RST_MAC_RX_MMCM (6775L)
+#define RST9504_RST_MAC_TX (6776L)
+#define RST9504_RST_NSEB (6777L)
+#define RST9504_RST_PCS_RX (6778L)
+#define RST9504_RST_PERIPH (6779L)
+#define RST9504_RST_PHY100G (6780L)
+#define RST9504_RST_PTP (6781L)
+#define RST9504_RST_PTP_MMCM (6782L)
+#define RST9504_RST_RPP (6783L)
+#define RST9504_RST_SDC (6784L)
+#define RST9504_RST_SERDES_RX (6785L)
+#define RST9504_RST_SERDES_TX (6786L)
+#define RST9504_RST_SI5326 (6787L)
+#define RST9504_RST_SYS (6788L)
+#define RST9504_RST_TS (6789L)
+#define RST9504_RST_TS_MMCM (6790L)
+#define RST9504_RST_STAT (6791L)
+#define RST9504_RST_STAT_PCS_RESET_BY_SERDES (6792L)
+#define RST9504_STAT (6793L)
+#define RST9504_STAT_DDR3_IDLY_MMCM_LOCKED (6794L)
+#define RST9504_STAT_DDR3_MMCM_LOCKED (6795L)
+#define RST9504_STAT_DDR3_PLL_LOCKED (6796L)
+#define RST9504_STAT_MAC_RX_MMCM_LOCKED (6797L)
+#define RST9504_STAT_PTP_MMCM_LOCKED (6798L)
+#define RST9504_STAT_SYNCE_MAIN_CLK_LOS (6799L)
+#define RST9504_STAT_SYS_MMCM_LOCKED (6800L)
+#define RST9504_STAT_TS_MMCM_LOCKED (6801L)
+#define RST9504_STICKY (6802L)
+#define RST9504_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6803L)
+#define RST9504_STICKY_DDR3_MMCM_UNLOCKED (6804L)
+#define RST9504_STICKY_DDR3_PLL_UNLOCKED (6805L)
+#define RST9504_STICKY_MAC_RX_MMCM_UNLOCKED (6806L)
+#define RST9504_STICKY_PTP_MMCM_UNLOCKED (6807L)
+#define RST9504_STICKY_TS_MMCM_UNLOCKED (6808L)
+/* RST9505 */
+#define RST9505_CTRL (6809L)
+#define RST9505_CTRL_PTP_MMCM_CLKSEL (6810L)
+#define RST9505_CTRL_TS_CLKSEL (6811L)
+#define RST9505_CTRL_TS_CLKSEL_OVERRIDE (6812L)
+#define RST9505_RST (6813L)
+#define RST9505_RST_DDR3 (6814L)
+#define RST9505_RST_DDR3_IDLY_MMCM (6815L)
+#define RST9505_RST_MAC_RX (6816L)
+#define RST9505_RST_MAC_RX_MMCM (6817L)
+#define RST9505_RST_MAC_TX (6818L)
+#define RST9505_RST_NSEB (6819L)
+#define RST9505_RST_PCS_RX (6820L)
+#define RST9505_RST_PERIPH (6821L)
+#define RST9505_RST_PHY100G (6822L)
+#define RST9505_RST_PTP (6823L)
+#define RST9505_RST_PTP_MMCM (6824L)
+#define RST9505_RST_RPP (6825L)
+#define RST9505_RST_SDC (6826L)
+#define RST9505_RST_SERDES_RX (6827L)
+#define RST9505_RST_SERDES_TX (6828L)
+#define RST9505_RST_SI5326 (6829L)
+#define RST9505_RST_SYS (6830L)
+#define RST9505_RST_TS (6831L)
+#define RST9505_RST_TS_MMCM (6832L)
+#define RST9505_RST_STAT (6833L)
+#define RST9505_RST_STAT_PCS_RESET_BY_SERDES (6834L)
+#define RST9505_STAT (6835L)
+#define RST9505_STAT_DDR3_IDLY_MMCM_LOCKED (6836L)
+#define RST9505_STAT_DDR3_MMCM_LOCKED (6837L)
+#define RST9505_STAT_DDR3_PLL_LOCKED (6838L)
+#define RST9505_STAT_MAC_RX_MMCM_LOCKED (6839L)
+#define RST9505_STAT_PTP_MMCM_LOCKED (6840L)
+#define RST9505_STAT_SYNCE_MAIN_CLK_LOS (6841L)
+#define RST9505_STAT_SYS_MMCM_LOCKED (6842L)
+#define RST9505_STAT_TS_MMCM_LOCKED (6843L)
+#define RST9505_STICKY (6844L)
+#define RST9505_STICKY_DDR3_IDLY_MMCM_UNLOCKED (6845L)
+#define RST9505_STICKY_DDR3_MMCM_UNLOCKED (6846L)
+#define RST9505_STICKY_DDR3_PLL_UNLOCKED (6847L)
+#define RST9505_STICKY_MAC_RX_MMCM_UNLOCKED (6848L)
+#define RST9505_STICKY_PTP_MMCM_UNLOCKED (6849L)
+#define RST9505_STICKY_TS_MMCM_UNLOCKED (6850L)
+/* RST9506 */
+/* RST9507 */
+/* RST9508 */
+#define RST9508_CTRL (6851L)
+#define RST9508_CTRL_PTP_MMCM_CLKSEL (6852L)
+#define RST9508_CTRL_TS_CLKSEL (6853L)
+#define RST9508_CTRL_TS_CLKSEL_OVERRIDE (6854L)
+#define RST9508_CTRL_TS_CLKSEL_REF (6855L)
+#define RST9508_POWER (6856L)
+#define RST9508_POWER_PU_NSEB (6857L)
+#define RST9508_POWER_PU_PHY (6858L)
+#define RST9508_RST (6859L)
+#define RST9508_RST_CORE_MMCM (6860L)
+#define RST9508_RST_DDR4 (6861L)
+#define RST9508_RST_MAC_RX (6862L)
+#define RST9508_RST_PERIPH (6863L)
+#define RST9508_RST_PHY (6864L)
+#define RST9508_RST_PTP (6865L)
+#define RST9508_RST_PTP_MMCM (6866L)
+#define RST9508_RST_RPP (6867L)
+#define RST9508_RST_SDC (6868L)
+#define RST9508_RST_SYS (6869L)
+#define RST9508_RST_SYS_MMCM (6870L)
+#define RST9508_RST_TMC (6871L)
+#define RST9508_RST_TS (6872L)
+#define RST9508_RST_TSM_REF_MMCM (6873L)
+#define RST9508_RST_TS_MMCM (6874L)
+#define RST9508_STAT (6875L)
+#define RST9508_STAT_CORE_MMCM_LOCKED (6876L)
+#define RST9508_STAT_DDR4_MMCM_LOCKED (6877L)
+#define RST9508_STAT_DDR4_PLL_LOCKED (6878L)
+#define RST9508_STAT_PCI_SYS_MMCM_LOCKED (6879L)
+#define RST9508_STAT_PTP_MMCM_LOCKED (6880L)
+#define RST9508_STAT_SYS_MMCM_LOCKED (6881L)
+#define RST9508_STAT_TSM_REF_MMCM_LOCKED (6882L)
+#define RST9508_STAT_TS_MMCM_LOCKED (6883L)
+#define RST9508_STICKY (6884L)
+#define RST9508_STICKY_CORE_MMCM_UNLOCKED (6885L)
+#define RST9508_STICKY_DDR4_MMCM_UNLOCKED (6886L)
+#define RST9508_STICKY_DDR4_PLL_UNLOCKED (6887L)
+#define RST9508_STICKY_PCI_SYS_MMCM_UNLOCKED (6888L)
+#define RST9508_STICKY_PTP_MMCM_UNLOCKED (6889L)
+#define RST9508_STICKY_SYS_MMCM_UNLOCKED (6890L)
+#define RST9508_STICKY_TSM_REF_MMCM_UNLOCKED (6891L)
+#define RST9508_STICKY_TS_MMCM_UNLOCKED (6892L)
+/* RST9509 */
+/* RST9510 */
+/* RST9512 */
+#define RST9512_CTRL (6893L)
+#define RST9512_CTRL_PTP_MMCM_CLKSEL (6894L)
+#define RST9512_CTRL_TS_CLKSEL (6895L)
+#define RST9512_CTRL_TS_CLKSEL_OVERRIDE (6896L)
+#define RST9512_CTRL_TS_CLKSEL_REF (6897L)
+#define RST9512_POWER (6898L)
+#define RST9512_POWER_PU_NSEB (6899L)
+#define RST9512_POWER_PU_PHY (6900L)
+#define RST9512_RST (6901L)
+#define RST9512_RST_CORE_MMCM (6902L)
+#define RST9512_RST_DDR4 (6903L)
+#define RST9512_RST_MAC_RX (6904L)
+#define RST9512_RST_MAC_TX (6905L)
+#define RST9512_RST_PCS_RX (6906L)
+#define RST9512_RST_PERIPH (6907L)
+#define RST9512_RST_PHY (6908L)
+#define RST9512_RST_PTP (6909L)
+#define RST9512_RST_PTP_MMCM (6910L)
+#define RST9512_RST_RPP (6911L)
+#define RST9512_RST_SDC (6912L)
+#define RST9512_RST_SERDES_RX (6913L)
+#define RST9512_RST_SERDES_RX_DATAPATH (6914L)
+#define RST9512_RST_SERDES_TX (6915L)
+#define RST9512_RST_SYS (6916L)
+#define RST9512_RST_SYS_MMCM (6917L)
+#define RST9512_RST_TS (6918L)
+#define RST9512_RST_TSM_REF_MMCM (6919L)
+#define RST9512_RST_TS_MMCM (6920L)
+#define RST9512_STAT (6921L)
+#define RST9512_STAT_CORE_MMCM_LOCKED (6922L)
+#define RST9512_STAT_DDR4_MMCM_LOCKED (6923L)
+#define RST9512_STAT_DDR4_PLL_LOCKED (6924L)
+#define RST9512_STAT_PCI_SYS_MMCM_LOCKED (6925L)
+#define RST9512_STAT_PTP_MMCM_LOCKED (6926L)
+#define RST9512_STAT_SYS_MMCM_LOCKED (6927L)
+#define RST9512_STAT_TSM_REF_MMCM_LOCKED (6928L)
+#define RST9512_STAT_TS_MMCM_LOCKED (6929L)
+#define RST9512_STICKY (6930L)
+#define RST9512_STICKY_CORE_MMCM_UNLOCKED (6931L)
+#define RST9512_STICKY_DDR4_MMCM_UNLOCKED (6932L)
+#define RST9512_STICKY_DDR4_PLL_UNLOCKED (6933L)
+#define RST9512_STICKY_PCI_SYS_MMCM_UNLOCKED (6934L)
+#define RST9512_STICKY_PTP_MMCM_UNLOCKED (6935L)
+#define RST9512_STICKY_SYS_MMCM_UNLOCKED (6936L)
+#define RST9512_STICKY_TSM_REF_MMCM_UNLOCKED (6937L)
+#define RST9512_STICKY_TS_MMCM_UNLOCKED (6938L)
+/* RST9513 */
+/* RST9515 */
+#define RST9515_CTRL (6939L)
+#define RST9515_CTRL_PTP_MMCM_CLKSEL (6940L)
+#define RST9515_CTRL_TS_CLKSEL (6941L)
+#define RST9515_CTRL_TS_CLKSEL_OVERRIDE (6942L)
+#define RST9515_CTRL_TS_CLKSEL_REF (6943L)
+#define RST9515_POWER (6944L)
+#define RST9515_POWER_PU_NSEB (6945L)
+#define RST9515_POWER_PU_PHY (6946L)
+#define RST9515_RST (6947L)
+#define RST9515_RST_CORE_MMCM (6948L)
+#define RST9515_RST_DDR4 (6949L)
+#define RST9515_RST_MAC_RX (6950L)
+#define RST9515_RST_PERIPH (6951L)
+#define RST9515_RST_PHY (6952L)
+#define RST9515_RST_PTP (6953L)
+#define RST9515_RST_PTP_MMCM (6954L)
+#define RST9515_RST_RPP (6955L)
+#define RST9515_RST_SDC (6956L)
+#define RST9515_RST_SYS (6957L)
+#define RST9515_RST_SYS_MMCM (6958L)
+#define RST9515_RST_TMC (6959L)
+#define RST9515_RST_TS (6960L)
+#define RST9515_RST_TSM_REF_MMCM (6961L)
+#define RST9515_RST_TS_MMCM (6962L)
+#define RST9515_STAT (6963L)
+#define RST9515_STAT_CORE_MMCM_LOCKED (6964L)
+#define RST9515_STAT_DDR4_MMCM_LOCKED (6965L)
+#define RST9515_STAT_DDR4_PLL_LOCKED (6966L)
+#define RST9515_STAT_PCI_SYS_MMCM_LOCKED (6967L)
+#define RST9515_STAT_PTP_MMCM_LOCKED (6968L)
+#define RST9515_STAT_SYS_MMCM_LOCKED (6969L)
+#define RST9515_STAT_TSM_REF_MMCM_LOCKED (6970L)
+#define RST9515_STAT_TS_MMCM_LOCKED (6971L)
+#define RST9515_STICKY (6972L)
+#define RST9515_STICKY_CORE_MMCM_UNLOCKED (6973L)
+#define RST9515_STICKY_DDR4_MMCM_UNLOCKED (6974L)
+#define RST9515_STICKY_DDR4_PLL_UNLOCKED (6975L)
+#define RST9515_STICKY_PCI_SYS_MMCM_UNLOCKED (6976L)
+#define RST9515_STICKY_PTP_MMCM_UNLOCKED (6977L)
+#define RST9515_STICKY_SYS_MMCM_UNLOCKED (6978L)
+#define RST9515_STICKY_TSM_REF_MMCM_UNLOCKED (6979L)
+#define RST9515_STICKY_TS_MMCM_UNLOCKED (6980L)
+/* RST9516 */
+#define RST9516_CTRL (6981L)
+#define RST9516_CTRL_PTP_MMCM_CLKSEL (6982L)
+#define RST9516_CTRL_TS_CLKSEL (6983L)
+#define RST9516_CTRL_TS_CLKSEL_OVERRIDE (6984L)
+#define RST9516_CTRL_TS_CLKSEL_REF (6985L)
+#define RST9516_POWER (6986L)
+#define RST9516_POWER_PU_NSEB (6987L)
+#define RST9516_POWER_PU_PHY (6988L)
+#define RST9516_RST (6989L)
+#define RST9516_RST_CORE_MMCM (6990L)
+#define RST9516_RST_DDR4 (6991L)
+#define RST9516_RST_MAC_RX (6992L)
+#define RST9516_RST_PCS_RX (6993L)
+#define RST9516_RST_PERIPH (6994L)
+#define RST9516_RST_PHY (6995L)
+#define RST9516_RST_PTP (6996L)
+#define RST9516_RST_PTP_MMCM (6997L)
+#define RST9516_RST_RPP (6998L)
+#define RST9516_RST_SDC (6999L)
+#define RST9516_RST_SERDES_RX (7000L)
+#define RST9516_RST_SERDES_TX (7001L)
+#define RST9516_RST_SYS (7002L)
+#define RST9516_RST_SYS_MMCM (7003L)
+#define RST9516_RST_TMC (7004L)
+#define RST9516_RST_TS (7005L)
+#define RST9516_RST_TSM_REF_MMCM (7006L)
+#define RST9516_RST_TS_MMCM (7007L)
+#define RST9516_STAT (7008L)
+#define RST9516_STAT_CORE_MMCM_LOCKED (7009L)
+#define RST9516_STAT_DDR4_MMCM_LOCKED (7010L)
+#define RST9516_STAT_DDR4_PLL_LOCKED (7011L)
+#define RST9516_STAT_PCI_SYS_MMCM_LOCKED (7012L)
+#define RST9516_STAT_PTP_MMCM_LOCKED (7013L)
+#define RST9516_STAT_SYS_MMCM_LOCKED (7014L)
+#define RST9516_STAT_TSM_REF_MMCM_LOCKED (7015L)
+#define RST9516_STAT_TS_MMCM_LOCKED (7016L)
+#define RST9516_STICKY (7017L)
+#define RST9516_STICKY_CORE_MMCM_UNLOCKED (7018L)
+#define RST9516_STICKY_DDR4_MMCM_UNLOCKED (7019L)
+#define RST9516_STICKY_DDR4_PLL_UNLOCKED (7020L)
+#define RST9516_STICKY_PCI_SYS_MMCM_UNLOCKED (7021L)
+#define RST9516_STICKY_PTP_MMCM_UNLOCKED (7022L)
+#define RST9516_STICKY_SYS_MMCM_UNLOCKED (7023L)
+#define RST9516_STICKY_TSM_REF_MMCM_UNLOCKED (7024L)
+#define RST9516_STICKY_TS_MMCM_UNLOCKED (7025L)
+/* RST9517 */
+#define RST9517_CTRL (7026L)
+#define RST9517_CTRL_PTP_MMCM_CLKSEL (7027L)
+#define RST9517_CTRL_TS_CLKSEL (7028L)
+#define RST9517_CTRL_TS_CLKSEL_OVERRIDE (7029L)
+#define RST9517_RST (7030L)
+#define RST9517_RST_DDR3 (7031L)
+#define RST9517_RST_DDR3_IDLY_MMCM (7032L)
+#define RST9517_RST_NSEB (7033L)
+#define RST9517_RST_PERIPH (7034L)
+#define RST9517_RST_PHY10G_QPLL (7035L)
+#define RST9517_RST_PHY3S10G (7036L)
+#define RST9517_RST_PHY3S_MMCM (7037L)
+#define RST9517_RST_PTP (7038L)
+#define RST9517_RST_PTP_MMCM (7039L)
+#define RST9517_RST_RPP (7040L)
+#define RST9517_RST_SDC (7041L)
+#define RST9517_RST_SI5326 (7042L)
+#define RST9517_RST_SYS (7043L)
+#define RST9517_RST_TS (7044L)
+#define RST9517_RST_TS_MMCM (7045L)
+#define RST9517_RST_STAT (7046L)
+#define RST9517_RST_STAT_PCS_RESET_BY_SERDES (7047L)
+#define RST9517_STAT (7048L)
+#define RST9517_STAT_DDR3_IDLY_MMCM_LOCKED (7049L)
+#define RST9517_STAT_DDR3_MMCM_LOCKED (7050L)
+#define RST9517_STAT_DDR3_PLL_LOCKED (7051L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_0_1 (7052L)
+#define RST9517_STAT_PHY10G_QPLL_LOCK_2_3 (7053L)
+#define RST9517_STAT_PHY3S_MMCM_LOCKED (7054L)
+#define RST9517_STAT_PTP_MMCM_LOCKED (7055L)
+#define RST9517_STAT_SYNCE_MAIN_CLK_LOS (7056L)
+#define RST9517_STAT_SYS_MMCM_LOCKED (7057L)
+#define RST9517_STAT_TS_MMCM_LOCKED (7058L)
+#define RST9517_STICKY (7059L)
+#define RST9517_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7060L)
+#define RST9517_STICKY_DDR3_MMCM_UNLOCKED (7061L)
+#define RST9517_STICKY_DDR3_PLL_UNLOCKED (7062L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7063L)
+#define RST9517_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7064L)
+#define RST9517_STICKY_PHY3S_MMCM_UNLOCKED (7065L)
+#define RST9517_STICKY_PTP_MMCM_UNLOCKED (7066L)
+#define RST9517_STICKY_TS_MMCM_UNLOCKED (7067L)
+/* RST9519 */
+#define RST9519_CTRL (7068L)
+#define RST9519_CTRL_PTP_MMCM_CLKSEL (7069L)
+#define RST9519_CTRL_TS_CLKSEL (7070L)
+#define RST9519_CTRL_TS_CLKSEL_OVERRIDE (7071L)
+#define RST9519_RST (7072L)
+#define RST9519_RST_DDR3 (7073L)
+#define RST9519_RST_DDR3_IDLY_MMCM (7074L)
+#define RST9519_RST_PERIPH (7075L)
+#define RST9519_RST_PHY10G (7076L)
+#define RST9519_RST_PHY10G_QPLL (7077L)
+#define RST9519_RST_PTP (7078L)
+#define RST9519_RST_PTP_MMCM (7079L)
+#define RST9519_RST_RPP (7080L)
+#define RST9519_RST_SDC (7081L)
+#define RST9519_RST_SI5326 (7082L)
+#define RST9519_RST_SYS (7083L)
+#define RST9519_RST_TS (7084L)
+#define RST9519_RST_TS_MMCM (7085L)
+#define RST9519_RST_STAT (7086L)
+#define RST9519_RST_STAT_PCS_RESET_BY_SERDES (7087L)
+#define RST9519_STAT (7088L)
+#define RST9519_STAT_DDR3_IDLY_MMCM_LOCKED (7089L)
+#define RST9519_STAT_DDR3_MMCM_LOCKED (7090L)
+#define RST9519_STAT_DDR3_PLL_LOCKED (7091L)
+#define RST9519_STAT_PHY10G_QPLL_LOCK (7092L)
+#define RST9519_STAT_PTP_MMCM_LOCKED (7093L)
+#define RST9519_STAT_SYNCE_MAIN_CLK_LOS (7094L)
+#define RST9519_STAT_SYS_MMCM_LOCKED (7095L)
+#define RST9519_STAT_TS_MMCM_LOCKED (7096L)
+#define RST9519_STICKY (7097L)
+#define RST9519_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7098L)
+#define RST9519_STICKY_DDR3_MMCM_UNLOCKED (7099L)
+#define RST9519_STICKY_DDR3_PLL_UNLOCKED (7100L)
+#define RST9519_STICKY_PHY10G_QPLL_UNLOCK (7101L)
+#define RST9519_STICKY_PTP_MMCM_UNLOCKED (7102L)
+#define RST9519_STICKY_TS_MMCM_UNLOCKED (7103L)
+/* RST9520 */
+/* RST9521 */
+#define RST9521_CTRL (7104L)
+#define RST9521_CTRL_PTP_MMCM_CLKSEL (7105L)
+#define RST9521_CTRL_TS_CLKSEL (7106L)
+#define RST9521_CTRL_TS_CLKSEL_OVERRIDE (7107L)
+#define RST9521_POWER (7108L)
+#define RST9521_POWER_PU_NSEB (7109L)
+#define RST9521_POWER_PU_PHY (7110L)
+#define RST9521_RST (7111L)
+#define RST9521_RST_CORE_MMCM (7112L)
+#define RST9521_RST_DDR4 (7113L)
+#define RST9521_RST_MAC_RX (7114L)
+#define RST9521_RST_PERIPH (7115L)
+#define RST9521_RST_PHY (7116L)
+#define RST9521_RST_PTP (7117L)
+#define RST9521_RST_PTP_MMCM (7118L)
+#define RST9521_RST_RPP (7119L)
+#define RST9521_RST_SDC (7120L)
+#define RST9521_RST_SYS (7121L)
+#define RST9521_RST_SYS_MMCM (7122L)
+#define RST9521_RST_TMC (7123L)
+#define RST9521_RST_TS (7124L)
+#define RST9521_RST_TSM_REF_MMCM (7125L)
+#define RST9521_RST_TS_MMCM (7126L)
+#define RST9521_STAT (7127L)
+#define RST9521_STAT_CORE_MMCM_LOCKED (7128L)
+#define RST9521_STAT_DDR4_MMCM_LOCKED (7129L)
+#define RST9521_STAT_DDR4_PLL_LOCKED (7130L)
+#define RST9521_STAT_PTP_MMCM_LOCKED (7131L)
+#define RST9521_STAT_SYS_MMCM_LOCKED (7132L)
+#define RST9521_STAT_TS_MMCM_LOCKED (7133L)
+#define RST9521_STICKY (7134L)
+#define RST9521_STICKY_CORE_MMCM_UNLOCKED (7135L)
+#define RST9521_STICKY_DDR4_MMCM_UNLOCKED (7136L)
+#define RST9521_STICKY_DDR4_PLL_UNLOCKED (7137L)
+#define RST9521_STICKY_PTP_MMCM_UNLOCKED (7138L)
+#define RST9521_STICKY_SYS_MMCM_UNLOCKED (7139L)
+#define RST9521_STICKY_TS_MMCM_UNLOCKED (7140L)
+/* RST9522 */
+#define RST9522_CTRL (7141L)
+#define RST9522_CTRL_PTP_MMCM_CLKSEL (7142L)
+#define RST9522_CTRL_TS_CLKSEL (7143L)
+#define RST9522_CTRL_TS_CLKSEL_OVERRIDE (7144L)
+#define RST9522_CTRL_TS_CLKSEL_REF (7145L)
+#define RST9522_POWER (7146L)
+#define RST9522_POWER_PU_NSEB (7147L)
+#define RST9522_POWER_PU_PHY (7148L)
+#define RST9522_RST (7149L)
+#define RST9522_RST_CORE_MMCM (7150L)
+#define RST9522_RST_DDR4 (7151L)
+#define RST9522_RST_PERIPH (7152L)
+#define RST9522_RST_PHY10G (7153L)
+#define RST9522_RST_PHY10G_QPLL (7154L)
+#define RST9522_RST_PTP (7155L)
+#define RST9522_RST_PTP_MMCM (7156L)
+#define RST9522_RST_RPP (7157L)
+#define RST9522_RST_SDC (7158L)
+#define RST9522_RST_SYS (7159L)
+#define RST9522_RST_SYS_MMCM (7160L)
+#define RST9522_RST_TS (7161L)
+#define RST9522_RST_TSM_REF_MMCM (7162L)
+#define RST9522_RST_TS_MMCM (7163L)
+#define RST9522_STAT (7164L)
+#define RST9522_STAT_CORE_MMCM_LOCKED (7165L)
+#define RST9522_STAT_DDR4_MMCM_LOCKED (7166L)
+#define RST9522_STAT_DDR4_PLL_LOCKED (7167L)
+#define RST9522_STAT_PCI_SYS_MMCM_LOCKED (7168L)
+#define RST9522_STAT_PHY10G_QPLL_LOCKED (7169L)
+#define RST9522_STAT_PTP_MMCM_LOCKED (7170L)
+#define RST9522_STAT_SYS_MMCM_LOCKED (7171L)
+#define RST9522_STAT_TSM_REF_MMCM_LOCKED (7172L)
+#define RST9522_STAT_TS_MMCM_LOCKED (7173L)
+#define RST9522_STICKY (7174L)
+#define RST9522_STICKY_CORE_MMCM_UNLOCKED (7175L)
+#define RST9522_STICKY_DDR4_MMCM_UNLOCKED (7176L)
+#define RST9522_STICKY_DDR4_PLL_UNLOCKED (7177L)
+#define RST9522_STICKY_PCI_SYS_MMCM_UNLOCKED (7178L)
+#define RST9522_STICKY_PHY10G_QPLL_UNLOCKED (7179L)
+#define RST9522_STICKY_PTP_MMCM_UNLOCKED (7180L)
+#define RST9522_STICKY_SYS_MMCM_UNLOCKED (7181L)
+#define RST9522_STICKY_TSM_REF_MMCM_UNLOCKED (7182L)
+#define RST9522_STICKY_TS_MMCM_UNLOCKED (7183L)
+/* RST9523 */
+/* RST9524 */
+#define RST9524_CTRL (7184L)
+#define RST9524_CTRL_PTP_MMCM_CLKSEL (7185L)
+#define RST9524_CTRL_TS_CLKSEL (7186L)
+#define RST9524_CTRL_TS_CLKSEL_OVERRIDE (7187L)
+#define RST9524_POWER (7188L)
+#define RST9524_POWER_PU_NSEB (7189L)
+#define RST9524_POWER_PU_PHY (7190L)
+#define RST9524_RST (7191L)
+#define RST9524_RST_CORE_MMCM (7192L)
+#define RST9524_RST_DDR4 (7193L)
+#define RST9524_RST_MAC_RX (7194L)
+#define RST9524_RST_PERIPH (7195L)
+#define RST9524_RST_PHY (7196L)
+#define RST9524_RST_PTP (7197L)
+#define RST9524_RST_PTP_MMCM (7198L)
+#define RST9524_RST_RPP (7199L)
+#define RST9524_RST_SDC (7200L)
+#define RST9524_RST_SYS (7201L)
+#define RST9524_RST_SYS_MMCM (7202L)
+#define RST9524_RST_TMC (7203L)
+#define RST9524_RST_TS (7204L)
+#define RST9524_RST_TS_MMCM (7205L)
+#define RST9524_STAT (7206L)
+#define RST9524_STAT_CORE_MMCM_LOCKED (7207L)
+#define RST9524_STAT_DDR4_MMCM_LOCKED (7208L)
+#define RST9524_STAT_DDR4_PLL_LOCKED (7209L)
+#define RST9524_STAT_PTP_MMCM_LOCKED (7210L)
+#define RST9524_STAT_SYS_MMCM_LOCKED (7211L)
+#define RST9524_STAT_TS_MMCM_LOCKED (7212L)
+#define RST9524_STICKY (7213L)
+#define RST9524_STICKY_CORE_MMCM_UNLOCKED (7214L)
+#define RST9524_STICKY_DDR4_MMCM_UNLOCKED (7215L)
+#define RST9524_STICKY_DDR4_PLL_UNLOCKED (7216L)
+#define RST9524_STICKY_PTP_MMCM_UNLOCKED (7217L)
+#define RST9524_STICKY_SYS_MMCM_UNLOCKED (7218L)
+#define RST9524_STICKY_TS_MMCM_UNLOCKED (7219L)
+/* RST9525 */
+#define RST9525_CTRL (7220L)
+#define RST9525_CTRL_PTP_MMCM_CLKSEL (7221L)
+#define RST9525_CTRL_TS_CLKSEL (7222L)
+#define RST9525_CTRL_TS_CLKSEL_OVERRIDE (7223L)
+#define RST9525_CTRL_TS_CLKSEL_REF (7224L)
+#define RST9525_POWER (7225L)
+#define RST9525_POWER_PU_NSEB (7226L)
+#define RST9525_POWER_PU_PHY (7227L)
+#define RST9525_RST (7228L)
+#define RST9525_RST_CORE_MMCM (7229L)
+#define RST9525_RST_DDR4 (7230L)
+#define RST9525_RST_MAC_RX (7231L)
+#define RST9525_RST_MAC_TX (7232L)
+#define RST9525_RST_PCS_RX (7233L)
+#define RST9525_RST_PERIPH (7234L)
+#define RST9525_RST_PHY (7235L)
+#define RST9525_RST_PTP (7236L)
+#define RST9525_RST_PTP_MMCM (7237L)
+#define RST9525_RST_RPP (7238L)
+#define RST9525_RST_SDC (7239L)
+#define RST9525_RST_SERDES_RX (7240L)
+#define RST9525_RST_SERDES_RX_DATAPATH (7241L)
+#define RST9525_RST_SERDES_TX (7242L)
+#define RST9525_RST_SYS (7243L)
+#define RST9525_RST_SYS_MMCM (7244L)
+#define RST9525_RST_TS (7245L)
+#define RST9525_RST_TSM_REF_MMCM (7246L)
+#define RST9525_RST_TS_MMCM (7247L)
+#define RST9525_STAT (7248L)
+#define RST9525_STAT_CORE_MMCM_LOCKED (7249L)
+#define RST9525_STAT_DDR4_MMCM_LOCKED (7250L)
+#define RST9525_STAT_DDR4_PLL_LOCKED (7251L)
+#define RST9525_STAT_PCI_SYS_MMCM_LOCKED (7252L)
+#define RST9525_STAT_PTP_MMCM_LOCKED (7253L)
+#define RST9525_STAT_SYS_MMCM_LOCKED (7254L)
+#define RST9525_STAT_TSM_REF_MMCM_LOCKED (7255L)
+#define RST9525_STAT_TS_MMCM_LOCKED (7256L)
+#define RST9525_STICKY (7257L)
+#define RST9525_STICKY_CORE_MMCM_UNLOCKED (7258L)
+#define RST9525_STICKY_DDR4_MMCM_UNLOCKED (7259L)
+#define RST9525_STICKY_DDR4_PLL_UNLOCKED (7260L)
+#define RST9525_STICKY_PCI_SYS_MMCM_UNLOCKED (7261L)
+#define RST9525_STICKY_PTP_MMCM_UNLOCKED (7262L)
+#define RST9525_STICKY_SYS_MMCM_UNLOCKED (7263L)
+#define RST9525_STICKY_TSM_REF_MMCM_UNLOCKED (7264L)
+#define RST9525_STICKY_TS_MMCM_UNLOCKED (7265L)
+/* RST9526 */
+#define RST9526_CTRL (7266L)
+#define RST9526_CTRL_PTP_MMCM_CLKSEL (7267L)
+#define RST9526_CTRL_TS_CLKSEL (7268L)
+#define RST9526_CTRL_TS_CLKSEL_OVERRIDE (7269L)
+#define RST9526_POWER (7270L)
+#define RST9526_POWER_PU_NSEB (7271L)
+#define RST9526_POWER_PU_PHY (7272L)
+#define RST9526_RST (7273L)
+#define RST9526_RST_CORE_MMCM (7274L)
+#define RST9526_RST_DDR4 (7275L)
+#define RST9526_RST_MAC_RX (7276L)
+#define RST9526_RST_MAC_TX (7277L)
+#define RST9526_RST_PCS_RX (7278L)
+#define RST9526_RST_PERIPH (7279L)
+#define RST9526_RST_PHY (7280L)
+#define RST9526_RST_PTP (7281L)
+#define RST9526_RST_PTP_MMCM (7282L)
+#define RST9526_RST_RPP (7283L)
+#define RST9526_RST_SDC (7284L)
+#define RST9526_RST_SERDES_RX (7285L)
+#define RST9526_RST_SERDES_RX_DATAPATH (7286L)
+#define RST9526_RST_SERDES_TX (7287L)
+#define RST9526_RST_SYS (7288L)
+#define RST9526_RST_SYS_MMCM (7289L)
+#define RST9526_RST_TMC (7290L)
+#define RST9526_RST_TS (7291L)
+#define RST9526_RST_TS_MMCM (7292L)
+#define RST9526_STAT (7293L)
+#define RST9526_STAT_CORE_MMCM_LOCKED (7294L)
+#define RST9526_STAT_DDR4_MMCM_LOCKED (7295L)
+#define RST9526_STAT_DDR4_PLL_LOCKED (7296L)
+#define RST9526_STAT_PTP_MMCM_LOCKED (7297L)
+#define RST9526_STAT_SYS_MMCM_LOCKED (7298L)
+#define RST9526_STAT_TS_MMCM_LOCKED (7299L)
+#define RST9526_STICKY (7300L)
+#define RST9526_STICKY_CORE_MMCM_UNLOCKED (7301L)
+#define RST9526_STICKY_DDR4_MMCM_UNLOCKED (7302L)
+#define RST9526_STICKY_DDR4_PLL_UNLOCKED (7303L)
+#define RST9526_STICKY_PTP_MMCM_UNLOCKED (7304L)
+#define RST9526_STICKY_SYS_MMCM_UNLOCKED (7305L)
+#define RST9526_STICKY_TS_MMCM_UNLOCKED (7306L)
+/* RST9527 */
+#define RST9527_CTRL (7307L)
+#define RST9527_CTRL_PTP_MMCM_CLKSEL (7308L)
+#define RST9527_CTRL_TS_CLKSEL (7309L)
+#define RST9527_CTRL_TS_CLKSEL_OVERRIDE (7310L)
+#define RST9527_POWER (7311L)
+#define RST9527_POWER_PU_NSEB (7312L)
+#define RST9527_POWER_PU_PHY (7313L)
+#define RST9527_RST (7314L)
+#define RST9527_RST_CORE_MMCM (7315L)
+#define RST9527_RST_DDR4 (7316L)
+#define RST9527_RST_MAC_RX (7317L)
+#define RST9527_RST_MAC_TX (7318L)
+#define RST9527_RST_PCS_RX (7319L)
+#define RST9527_RST_PERIPH (7320L)
+#define RST9527_RST_PHY (7321L)
+#define RST9527_RST_PTP (7322L)
+#define RST9527_RST_PTP_MMCM (7323L)
+#define RST9527_RST_RPP (7324L)
+#define RST9527_RST_SDC (7325L)
+#define RST9527_RST_SERDES_RX (7326L)
+#define RST9527_RST_SERDES_RX_DATAPATH (7327L)
+#define RST9527_RST_SERDES_TX (7328L)
+#define RST9527_RST_SYS (7329L)
+#define RST9527_RST_SYS_MMCM (7330L)
+#define RST9527_RST_TMC (7331L)
+#define RST9527_RST_TS (7332L)
+#define RST9527_RST_TS_MMCM (7333L)
+#define RST9527_STAT (7334L)
+#define RST9527_STAT_CORE_MMCM_LOCKED (7335L)
+#define RST9527_STAT_DDR4_MMCM_LOCKED (7336L)
+#define RST9527_STAT_DDR4_PLL_LOCKED (7337L)
+#define RST9527_STAT_PTP_MMCM_LOCKED (7338L)
+#define RST9527_STAT_SYS_MMCM_LOCKED (7339L)
+#define RST9527_STAT_TS_MMCM_LOCKED (7340L)
+#define RST9527_STICKY (7341L)
+#define RST9527_STICKY_CORE_MMCM_UNLOCKED (7342L)
+#define RST9527_STICKY_DDR4_MMCM_UNLOCKED (7343L)
+#define RST9527_STICKY_DDR4_PLL_UNLOCKED (7344L)
+#define RST9527_STICKY_PTP_MMCM_UNLOCKED (7345L)
+#define RST9527_STICKY_SYS_MMCM_UNLOCKED (7346L)
+#define RST9527_STICKY_TS_MMCM_UNLOCKED (7347L)
+/* RST9528 */
+/* RST9529 */
+#define RST9529_CTRL (7348L)
+#define RST9529_CTRL_PTP_MMCM_CLKSEL (7349L)
+#define RST9529_CTRL_TS_CLKSEL (7350L)
+#define RST9529_CTRL_TS_CLKSEL_OVERRIDE (7351L)
+#define RST9529_CTRL_TS_CLKSEL_REF (7352L)
+#define RST9529_POWER (7353L)
+#define RST9529_POWER_PU_NSEB (7354L)
+#define RST9529_POWER_PU_PHY (7355L)
+#define RST9529_RST (7356L)
+#define RST9529_RST_CORE_MMCM (7357L)
+#define RST9529_RST_DDR4 (7358L)
+#define RST9529_RST_PERIPH (7359L)
+#define RST9529_RST_PHY (7360L)
+#define RST9529_RST_PTP (7361L)
+#define RST9529_RST_PTP_MMCM (7362L)
+#define RST9529_RST_RPP (7363L)
+#define RST9529_RST_SDC (7364L)
+#define RST9529_RST_SYS (7365L)
+#define RST9529_RST_SYS_MMCM (7366L)
+#define RST9529_RST_TS (7367L)
+#define RST9529_RST_TSM_REF_MMCM (7368L)
+#define RST9529_RST_TS_MMCM (7369L)
+#define RST9529_STAT (7370L)
+#define RST9529_STAT_CORE_MMCM_LOCKED (7371L)
+#define RST9529_STAT_DDR4_MMCM_LOCKED (7372L)
+#define RST9529_STAT_DDR4_PLL_LOCKED (7373L)
+#define RST9529_STAT_PCI_SYS_MMCM_LOCKED (7374L)
+#define RST9529_STAT_PTP_MMCM_LOCKED (7375L)
+#define RST9529_STAT_SYS_MMCM_LOCKED (7376L)
+#define RST9529_STAT_TSM_REF_MMCM_LOCKED (7377L)
+#define RST9529_STAT_TS_MMCM_LOCKED (7378L)
+#define RST9529_STICKY (7379L)
+#define RST9529_STICKY_CORE_MMCM_UNLOCKED (7380L)
+#define RST9529_STICKY_DDR4_MMCM_UNLOCKED (7381L)
+#define RST9529_STICKY_DDR4_PLL_UNLOCKED (7382L)
+#define RST9529_STICKY_PCI_SYS_MMCM_UNLOCKED (7383L)
+#define RST9529_STICKY_PTP_MMCM_UNLOCKED (7384L)
+#define RST9529_STICKY_SYS_MMCM_UNLOCKED (7385L)
+#define RST9529_STICKY_TSM_REF_MMCM_UNLOCKED (7386L)
+#define RST9529_STICKY_TS_MMCM_UNLOCKED (7387L)
+/* RST9530 */
+#define RST9530_CTRL (7388L)
+#define RST9530_CTRL_PTP_MMCM_CLKSEL (7389L)
+#define RST9530_CTRL_TS_CLKSEL (7390L)
+#define RST9530_CTRL_TS_CLKSEL_OVERRIDE (7391L)
+#define RST9530_CTRL_TS_CLKSEL_REF (7392L)
+#define RST9530_POWER (7393L)
+#define RST9530_POWER_PU_NSEB (7394L)
+#define RST9530_POWER_PU_PHY (7395L)
+#define RST9530_RST (7396L)
+#define RST9530_RST_CORE_MMCM (7397L)
+#define RST9530_RST_DDR4 (7398L)
+#define RST9530_RST_NFV_OVS (7399L)
+#define RST9530_RST_PERIPH (7400L)
+#define RST9530_RST_PHY (7401L)
+#define RST9530_RST_PTP (7402L)
+#define RST9530_RST_PTP_MMCM (7403L)
+#define RST9530_RST_RPP (7404L)
+#define RST9530_RST_SDC (7405L)
+#define RST9530_RST_SYS (7406L)
+#define RST9530_RST_SYS_MMCM (7407L)
+#define RST9530_RST_TMC (7408L)
+#define RST9530_RST_TS (7409L)
+#define RST9530_RST_TSM_REF_MMCM (7410L)
+#define RST9530_RST_TS_MMCM (7411L)
+#define RST9530_STAT (7412L)
+#define RST9530_STAT_CORE_MMCM_LOCKED (7413L)
+#define RST9530_STAT_DDR4_MMCM_LOCKED (7414L)
+#define RST9530_STAT_DDR4_PLL_LOCKED (7415L)
+#define RST9530_STAT_PTP_MMCM_LOCKED (7416L)
+#define RST9530_STAT_SYS_MMCM_LOCKED (7417L)
+#define RST9530_STAT_TSM_REF_MMCM_LOCKED (7418L)
+#define RST9530_STAT_TS_MMCM_LOCKED (7419L)
+#define RST9530_STICKY (7420L)
+#define RST9530_STICKY_CORE_MMCM_UNLOCKED (7421L)
+#define RST9530_STICKY_DDR4_MMCM_UNLOCKED (7422L)
+#define RST9530_STICKY_DDR4_PLL_UNLOCKED (7423L)
+#define RST9530_STICKY_PCI_SYS_MMCM_UNLOCKED (7424L)
+#define RST9530_STICKY_PTP_MMCM_UNLOCKED (7425L)
+#define RST9530_STICKY_SYS_MMCM_UNLOCKED (7426L)
+#define RST9530_STICKY_TSM_REF_MMCM_UNLOCKED (7427L)
+#define RST9530_STICKY_TS_MMCM_UNLOCKED (7428L)
+/* RST9531 */
+#define RST9531_CTRL (7429L)
+#define RST9531_CTRL_PTP_MMCM_CLKSEL (7430L)
+#define RST9531_CTRL_TS_CLKSEL (7431L)
+#define RST9531_CTRL_TS_CLKSEL_OVERRIDE (7432L)
+#define RST9531_CTRL_TS_CLKSEL_REF (7433L)
+#define RST9531_POWER (7434L)
+#define RST9531_POWER_PU_NSEB (7435L)
+#define RST9531_POWER_PU_PHY (7436L)
+#define RST9531_RST (7437L)
+#define RST9531_RST_CORE_MMCM (7438L)
+#define RST9531_RST_DDR4 (7439L)
+#define RST9531_RST_PERIPH (7440L)
+#define RST9531_RST_PHY (7441L)
+#define RST9531_RST_PTP (7442L)
+#define RST9531_RST_PTP_MMCM (7443L)
+#define RST9531_RST_RPP (7444L)
+#define RST9531_RST_SDC (7445L)
+#define RST9531_RST_SYS (7446L)
+#define RST9531_RST_SYS_MMCM (7447L)
+#define RST9531_RST_TS (7448L)
+#define RST9531_RST_TSM_REF_MMCM (7449L)
+#define RST9531_RST_TS_MMCM (7450L)
+#define RST9531_STAT (7451L)
+#define RST9531_STAT_CORE_MMCM_LOCKED (7452L)
+#define RST9531_STAT_DDR4_MMCM_LOCKED (7453L)
+#define RST9531_STAT_DDR4_PLL_LOCKED (7454L)
+#define RST9531_STAT_PCI_SYS_MMCM_LOCKED (7455L)
+#define RST9531_STAT_PTP_MMCM_LOCKED (7456L)
+#define RST9531_STAT_SYS_MMCM_LOCKED (7457L)
+#define RST9531_STAT_TSM_REF_MMCM_LOCKED (7458L)
+#define RST9531_STAT_TS_MMCM_LOCKED (7459L)
+#define RST9531_STICKY (7460L)
+#define RST9531_STICKY_CORE_MMCM_UNLOCKED (7461L)
+#define RST9531_STICKY_DDR4_MMCM_UNLOCKED (7462L)
+#define RST9531_STICKY_DDR4_PLL_UNLOCKED (7463L)
+#define RST9531_STICKY_PCI_SYS_MMCM_UNLOCKED (7464L)
+#define RST9531_STICKY_PTP_MMCM_UNLOCKED (7465L)
+#define RST9531_STICKY_SYS_MMCM_UNLOCKED (7466L)
+#define RST9531_STICKY_TSM_REF_MMCM_UNLOCKED (7467L)
+#define RST9531_STICKY_TS_MMCM_UNLOCKED (7468L)
+/* RST9532 */
+#define RST9532_CTRL (7469L)
+#define RST9532_CTRL_PTP_MMCM_CLKSEL (7470L)
+#define RST9532_CTRL_TS_CLKSEL (7471L)
+#define RST9532_CTRL_TS_CLKSEL_OVERRIDE (7472L)
+#define RST9532_POWER (7473L)
+#define RST9532_POWER_PU_NSEB (7474L)
+#define RST9532_POWER_PU_PHY (7475L)
+#define RST9532_RST (7476L)
+#define RST9532_RST_CORE_MMCM (7477L)
+#define RST9532_RST_DDR4 (7478L)
+#define RST9532_RST_PERIPH (7479L)
+#define RST9532_RST_PHY (7480L)
+#define RST9532_RST_PTP (7481L)
+#define RST9532_RST_PTP_MMCM (7482L)
+#define RST9532_RST_RPP (7483L)
+#define RST9532_RST_SDC (7484L)
+#define RST9532_RST_SYS (7485L)
+#define RST9532_RST_SYS_MMCM (7486L)
+#define RST9532_RST_TMC (7487L)
+#define RST9532_RST_TS (7488L)
+#define RST9532_RST_TS_MMCM (7489L)
+#define RST9532_STAT (7490L)
+#define RST9532_STAT_CORE_MMCM_LOCKED (7491L)
+#define RST9532_STAT_DDR4_MMCM_LOCKED (7492L)
+#define RST9532_STAT_DDR4_PLL_LOCKED (7493L)
+#define RST9532_STAT_PTP_MMCM_LOCKED (7494L)
+#define RST9532_STAT_SYS_MMCM_LOCKED (7495L)
+#define RST9532_STAT_TS_MMCM_LOCKED (7496L)
+#define RST9532_STICKY (7497L)
+#define RST9532_STICKY_CORE_MMCM_UNLOCKED (7498L)
+#define RST9532_STICKY_DDR4_MMCM_UNLOCKED (7499L)
+#define RST9532_STICKY_DDR4_PLL_UNLOCKED (7500L)
+#define RST9532_STICKY_PTP_MMCM_UNLOCKED (7501L)
+#define RST9532_STICKY_SYS_MMCM_UNLOCKED (7502L)
+#define RST9532_STICKY_TS_MMCM_UNLOCKED (7503L)
+/* RST9533 */
+#define RST9533_CTRL (7504L)
+#define RST9533_CTRL_PTP_MMCM_CLKSEL (7505L)
+#define RST9533_CTRL_TS_CLKSEL (7506L)
+#define RST9533_CTRL_TS_CLKSEL_OVERRIDE (7507L)
+#define RST9533_POWER (7508L)
+#define RST9533_POWER_PU_NSEB (7509L)
+#define RST9533_POWER_PU_PHY (7510L)
+#define RST9533_RST (7511L)
+#define RST9533_RST_CORE_MMCM (7512L)
+#define RST9533_RST_DDR4 (7513L)
+#define RST9533_RST_PERIPH (7514L)
+#define RST9533_RST_PHY (7515L)
+#define RST9533_RST_PTP (7516L)
+#define RST9533_RST_PTP_MMCM (7517L)
+#define RST9533_RST_RPP (7518L)
+#define RST9533_RST_SDC (7519L)
+#define RST9533_RST_SYS (7520L)
+#define RST9533_RST_SYS_MMCM (7521L)
+#define RST9533_RST_TMC (7522L)
+#define RST9533_RST_TS (7523L)
+#define RST9533_RST_TS_MMCM (7524L)
+#define RST9533_STAT (7525L)
+#define RST9533_STAT_CORE_MMCM_LOCKED (7526L)
+#define RST9533_STAT_DDR4_MMCM_LOCKED (7527L)
+#define RST9533_STAT_DDR4_PLL_LOCKED (7528L)
+#define RST9533_STAT_PTP_MMCM_LOCKED (7529L)
+#define RST9533_STAT_SYS_MMCM_LOCKED (7530L)
+#define RST9533_STAT_TS_MMCM_LOCKED (7531L)
+#define RST9533_STICKY (7532L)
+#define RST9533_STICKY_CORE_MMCM_UNLOCKED (7533L)
+#define RST9533_STICKY_DDR4_MMCM_UNLOCKED (7534L)
+#define RST9533_STICKY_DDR4_PLL_UNLOCKED (7535L)
+#define RST9533_STICKY_PTP_MMCM_UNLOCKED (7536L)
+#define RST9533_STICKY_SYS_MMCM_UNLOCKED (7537L)
+#define RST9533_STICKY_TS_MMCM_UNLOCKED (7538L)
+/* RST9534 */
+#define RST9534_CTRL (7539L)
+#define RST9534_CTRL_PTP_MMCM_CLKSEL (7540L)
+#define RST9534_CTRL_TS_CLKSEL (7541L)
+#define RST9534_CTRL_TS_CLKSEL_OVERRIDE (7542L)
+#define RST9534_POWER (7543L)
+#define RST9534_POWER_PU_NSEB (7544L)
+#define RST9534_POWER_PU_PHY (7545L)
+#define RST9534_RST (7546L)
+#define RST9534_RST_CORE_MMCM (7547L)
+#define RST9534_RST_DDR4 (7548L)
+#define RST9534_RST_PERIPH (7549L)
+#define RST9534_RST_PHY (7550L)
+#define RST9534_RST_PTP (7551L)
+#define RST9534_RST_PTP_MMCM (7552L)
+#define RST9534_RST_RPP (7553L)
+#define RST9534_RST_SDC (7554L)
+#define RST9534_RST_SYS (7555L)
+#define RST9534_RST_SYS_MMCM (7556L)
+#define RST9534_RST_TMC (7557L)
+#define RST9534_RST_TS (7558L)
+#define RST9534_RST_TS_MMCM (7559L)
+#define RST9534_STAT (7560L)
+#define RST9534_STAT_CORE_MMCM_LOCKED (7561L)
+#define RST9534_STAT_DDR4_MMCM_LOCKED (7562L)
+#define RST9534_STAT_DDR4_PLL_LOCKED (7563L)
+#define RST9534_STAT_PTP_MMCM_LOCKED (7564L)
+#define RST9534_STAT_SYS_MMCM_LOCKED (7565L)
+#define RST9534_STAT_TS_MMCM_LOCKED (7566L)
+#define RST9534_STICKY (7567L)
+#define RST9534_STICKY_CORE_MMCM_UNLOCKED (7568L)
+#define RST9534_STICKY_DDR4_MMCM_UNLOCKED (7569L)
+#define RST9534_STICKY_DDR4_PLL_UNLOCKED (7570L)
+#define RST9534_STICKY_PTP_MMCM_UNLOCKED (7571L)
+#define RST9534_STICKY_SYS_MMCM_UNLOCKED (7572L)
+#define RST9534_STICKY_TS_MMCM_UNLOCKED (7573L)
+/* RST9535 */
+#define RST9535_CTRL (7574L)
+#define RST9535_CTRL_PTP_MMCM_CLKSEL (7575L)
+#define RST9535_CTRL_TS_CLKSEL (7576L)
+#define RST9535_CTRL_TS_CLKSEL_OVERRIDE (7577L)
+#define RST9535_POWER (7578L)
+#define RST9535_POWER_PU_NSEB (7579L)
+#define RST9535_POWER_PU_PHY (7580L)
+#define RST9535_RST (7581L)
+#define RST9535_RST_CORE_MMCM (7582L)
+#define RST9535_RST_DDR4 (7583L)
+#define RST9535_RST_MAC_RX (7584L)
+#define RST9535_RST_MAC_TX (7585L)
+#define RST9535_RST_PCS_RX (7586L)
+#define RST9535_RST_PERIPH (7587L)
+#define RST9535_RST_PHY (7588L)
+#define RST9535_RST_PTP (7589L)
+#define RST9535_RST_PTP_MMCM (7590L)
+#define RST9535_RST_RPP (7591L)
+#define RST9535_RST_SDC (7592L)
+#define RST9535_RST_SERDES_RX (7593L)
+#define RST9535_RST_SERDES_RX_DATAPATH (7594L)
+#define RST9535_RST_SERDES_TX (7595L)
+#define RST9535_RST_SYS (7596L)
+#define RST9535_RST_SYS_MMCM (7597L)
+#define RST9535_RST_TMC (7598L)
+#define RST9535_RST_TS (7599L)
+#define RST9535_RST_TS_MMCM (7600L)
+#define RST9535_STAT (7601L)
+#define RST9535_STAT_CORE_MMCM_LOCKED (7602L)
+#define RST9535_STAT_DDR4_MMCM_LOCKED (7603L)
+#define RST9535_STAT_DDR4_PLL_LOCKED (7604L)
+#define RST9535_STAT_PTP_MMCM_LOCKED (7605L)
+#define RST9535_STAT_SYS_MMCM_LOCKED (7606L)
+#define RST9535_STAT_TS_MMCM_LOCKED (7607L)
+#define RST9535_STICKY (7608L)
+#define RST9535_STICKY_CORE_MMCM_UNLOCKED (7609L)
+#define RST9535_STICKY_DDR4_MMCM_UNLOCKED (7610L)
+#define RST9535_STICKY_DDR4_PLL_UNLOCKED (7611L)
+#define RST9535_STICKY_PTP_MMCM_UNLOCKED (7612L)
+#define RST9535_STICKY_SYS_MMCM_UNLOCKED (7613L)
+#define RST9535_STICKY_TS_MMCM_UNLOCKED (7614L)
+/* RST9536 */
+#define RST9536_CTRL (7615L)
+#define RST9536_CTRL_PTP_MMCM_CLKSEL (7616L)
+#define RST9536_CTRL_TS_CLKSEL (7617L)
+#define RST9536_CTRL_TS_CLKSEL_OVERRIDE (7618L)
+#define RST9536_POWER (7619L)
+#define RST9536_POWER_PU_NSEB (7620L)
+#define RST9536_POWER_PU_PHY (7621L)
+#define RST9536_RST (7622L)
+#define RST9536_RST_CORE_MMCM (7623L)
+#define RST9536_RST_DDR4 (7624L)
+#define RST9536_RST_MAC_RX (7625L)
+#define RST9536_RST_PERIPH (7626L)
+#define RST9536_RST_PHY (7627L)
+#define RST9536_RST_PTP (7628L)
+#define RST9536_RST_PTP_MMCM (7629L)
+#define RST9536_RST_RPP (7630L)
+#define RST9536_RST_SDC (7631L)
+#define RST9536_RST_SYS (7632L)
+#define RST9536_RST_SYS_MMCM (7633L)
+#define RST9536_RST_TMC (7634L)
+#define RST9536_RST_TS (7635L)
+#define RST9536_RST_TS_MMCM (7636L)
+#define RST9536_STAT (7637L)
+#define RST9536_STAT_CORE_MMCM_LOCKED (7638L)
+#define RST9536_STAT_DDR4_MMCM_LOCKED (7639L)
+#define RST9536_STAT_DDR4_PLL_LOCKED (7640L)
+#define RST9536_STAT_PTP_MMCM_LOCKED (7641L)
+#define RST9536_STAT_SYS_MMCM_LOCKED (7642L)
+#define RST9536_STAT_TS_MMCM_LOCKED (7643L)
+#define RST9536_STICKY (7644L)
+#define RST9536_STICKY_CORE_MMCM_UNLOCKED (7645L)
+#define RST9536_STICKY_DDR4_MMCM_UNLOCKED (7646L)
+#define RST9536_STICKY_DDR4_PLL_UNLOCKED (7647L)
+#define RST9536_STICKY_PTP_MMCM_UNLOCKED (7648L)
+#define RST9536_STICKY_SYS_MMCM_UNLOCKED (7649L)
+#define RST9536_STICKY_TS_MMCM_UNLOCKED (7650L)
+/* RST9537 */
+#define RST9537_CTRL (7651L)
+#define RST9537_CTRL_PTP_MMCM_CLKSEL (7652L)
+#define RST9537_CTRL_TS_CLKSEL (7653L)
+#define RST9537_CTRL_TS_CLKSEL_OVERRIDE (7654L)
+#define RST9537_RST (7655L)
+#define RST9537_RST_DDR3 (7656L)
+#define RST9537_RST_DDR3_IDLY_MMCM (7657L)
+#define RST9537_RST_NSEB (7658L)
+#define RST9537_RST_PERIPH (7659L)
+#define RST9537_RST_PHY10G_QPLL (7660L)
+#define RST9537_RST_PHY3S10G (7661L)
+#define RST9537_RST_PHY3S_MMCM (7662L)
+#define RST9537_RST_PTP (7663L)
+#define RST9537_RST_PTP_MMCM (7664L)
+#define RST9537_RST_RPP (7665L)
+#define RST9537_RST_SDC (7666L)
+#define RST9537_RST_SI5326 (7667L)
+#define RST9537_RST_SYS (7668L)
+#define RST9537_RST_TS (7669L)
+#define RST9537_RST_TS_MMCM (7670L)
+#define RST9537_RST_STAT (7671L)
+#define RST9537_RST_STAT_PCS_RESET_BY_SERDES (7672L)
+#define RST9537_STAT (7673L)
+#define RST9537_STAT_DDR3_IDLY_MMCM_LOCKED (7674L)
+#define RST9537_STAT_DDR3_MMCM_LOCKED (7675L)
+#define RST9537_STAT_DDR3_PLL_LOCKED (7676L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_0_1 (7677L)
+#define RST9537_STAT_PHY10G_QPLL_LOCK_2_3 (7678L)
+#define RST9537_STAT_PHY3S_MMCM_LOCKED (7679L)
+#define RST9537_STAT_PTP_MMCM_LOCKED (7680L)
+#define RST9537_STAT_SYNCE_MAIN_CLK_LOS (7681L)
+#define RST9537_STAT_SYS_MMCM_LOCKED (7682L)
+#define RST9537_STAT_TS_MMCM_LOCKED (7683L)
+#define RST9537_STICKY (7684L)
+#define RST9537_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7685L)
+#define RST9537_STICKY_DDR3_MMCM_UNLOCKED (7686L)
+#define RST9537_STICKY_DDR3_PLL_UNLOCKED (7687L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7688L)
+#define RST9537_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7689L)
+#define RST9537_STICKY_PHY3S_MMCM_UNLOCKED (7690L)
+#define RST9537_STICKY_PTP_MMCM_UNLOCKED (7691L)
+#define RST9537_STICKY_TS_MMCM_UNLOCKED (7692L)
+/* RST9538 */
+#define RST9538_CTRL (7693L)
+#define RST9538_CTRL_PTP_MMCM_CLKSEL (7694L)
+#define RST9538_CTRL_TS_CLKSEL (7695L)
+#define RST9538_CTRL_TS_CLKSEL_OVERRIDE (7696L)
+#define RST9538_RST (7697L)
+#define RST9538_RST_DDR3 (7698L)
+#define RST9538_RST_DDR3_IDLY_MMCM (7699L)
+#define RST9538_RST_NSEB (7700L)
+#define RST9538_RST_PERIPH (7701L)
+#define RST9538_RST_PHY10G_QPLL (7702L)
+#define RST9538_RST_PHY3S10G (7703L)
+#define RST9538_RST_PHY3S_MMCM (7704L)
+#define RST9538_RST_PTP (7705L)
+#define RST9538_RST_PTP_MMCM (7706L)
+#define RST9538_RST_RPP (7707L)
+#define RST9538_RST_SDC (7708L)
+#define RST9538_RST_SI5326 (7709L)
+#define RST9538_RST_SYS (7710L)
+#define RST9538_RST_TS (7711L)
+#define RST9538_RST_TS_MMCM (7712L)
+#define RST9538_RST_STAT (7713L)
+#define RST9538_RST_STAT_PCS_RESET_BY_SERDES (7714L)
+#define RST9538_STAT (7715L)
+#define RST9538_STAT_DDR3_IDLY_MMCM_LOCKED (7716L)
+#define RST9538_STAT_DDR3_MMCM_LOCKED (7717L)
+#define RST9538_STAT_DDR3_PLL_LOCKED (7718L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_0_1 (7719L)
+#define RST9538_STAT_PHY10G_QPLL_LOCK_2_3 (7720L)
+#define RST9538_STAT_PHY3S_MMCM_LOCKED (7721L)
+#define RST9538_STAT_PTP_MMCM_LOCKED (7722L)
+#define RST9538_STAT_SYNCE_MAIN_CLK_LOS (7723L)
+#define RST9538_STAT_SYS_MMCM_LOCKED (7724L)
+#define RST9538_STAT_TS_MMCM_LOCKED (7725L)
+#define RST9538_STICKY (7726L)
+#define RST9538_STICKY_DDR3_IDLY_MMCM_UNLOCKED (7727L)
+#define RST9538_STICKY_DDR3_MMCM_UNLOCKED (7728L)
+#define RST9538_STICKY_DDR3_PLL_UNLOCKED (7729L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_0_1 (7730L)
+#define RST9538_STICKY_PHY10G_QPLL_UNLOCK_2_3 (7731L)
+#define RST9538_STICKY_PHY3S_MMCM_UNLOCKED (7732L)
+#define RST9538_STICKY_PTP_MMCM_UNLOCKED (7733L)
+#define RST9538_STICKY_TS_MMCM_UNLOCKED (7734L)
+/* RST9539 */
+#define RST9539_CTRL (7735L)
+#define RST9539_CTRL_PTP_MMCM_CLKSEL (7736L)
+#define RST9539_CTRL_TS_CLKSEL (7737L)
+#define RST9539_CTRL_TS_CLKSEL_OVERRIDE (7738L)
+#define RST9539_POWER (7739L)
+#define RST9539_POWER_PU_NSEB (7740L)
+#define RST9539_POWER_PU_PHY (7741L)
+#define RST9539_RST (7742L)
+#define RST9539_RST_CORE_MMCM (7743L)
+#define RST9539_RST_DDR4 (7744L)
+#define RST9539_RST_PERIPH (7745L)
+#define RST9539_RST_PHY (7746L)
+#define RST9539_RST_PTP (7747L)
+#define RST9539_RST_PTP_MMCM (7748L)
+#define RST9539_RST_RPP (7749L)
+#define RST9539_RST_SDC (7750L)
+#define RST9539_RST_SYS (7751L)
+#define RST9539_RST_SYS_MMCM (7752L)
+#define RST9539_RST_TMC (7753L)
+#define RST9539_RST_TS (7754L)
+#define RST9539_RST_TS_MMCM (7755L)
+#define RST9539_STAT (7756L)
+#define RST9539_STAT_CORE_MMCM_LOCKED (7757L)
+#define RST9539_STAT_DDR4_MMCM_LOCKED (7758L)
+#define RST9539_STAT_DDR4_PLL_LOCKED (7759L)
+#define RST9539_STAT_PTP_MMCM_LOCKED (7760L)
+#define RST9539_STAT_SYS_MMCM_LOCKED (7761L)
+#define RST9539_STAT_TS_MMCM_LOCKED (7762L)
+#define RST9539_STICKY (7763L)
+#define RST9539_STICKY_CORE_MMCM_UNLOCKED (7764L)
+#define RST9539_STICKY_DDR4_MMCM_UNLOCKED (7765L)
+#define RST9539_STICKY_DDR4_PLL_UNLOCKED (7766L)
+#define RST9539_STICKY_PTP_MMCM_UNLOCKED (7767L)
+#define RST9539_STICKY_SYS_MMCM_UNLOCKED (7768L)
+#define RST9539_STICKY_TS_MMCM_UNLOCKED (7769L)
+/* RST9540 */
+#define RST9540_CTRL (7770L)
+#define RST9540_CTRL_PTP_MMCM_CLKSEL (7771L)
+#define RST9540_CTRL_TS_CLKSEL (7772L)
+#define RST9540_CTRL_TS_CLKSEL_OVERRIDE (7773L)
+#define RST9540_POWER (7774L)
+#define RST9540_POWER_PU_NSEB (7775L)
+#define RST9540_POWER_PU_PHY (7776L)
+#define RST9540_RST (7777L)
+#define RST9540_RST_CORE_MMCM (7778L)
+#define RST9540_RST_DDR4 (7779L)
+#define RST9540_RST_MAC_RX (7780L)
+#define RST9540_RST_MAC_TX (7781L)
+#define RST9540_RST_PCS_RX (7782L)
+#define RST9540_RST_PERIPH (7783L)
+#define RST9540_RST_PHY (7784L)
+#define RST9540_RST_PTP (7785L)
+#define RST9540_RST_PTP_MMCM (7786L)
+#define RST9540_RST_RPP (7787L)
+#define RST9540_RST_SDC (7788L)
+#define RST9540_RST_SERDES_RX (7789L)
+#define RST9540_RST_SERDES_RX_DATAPATH (7790L)
+#define RST9540_RST_SERDES_TX (7791L)
+#define RST9540_RST_SYS (7792L)
+#define RST9540_RST_SYS_MMCM (7793L)
+#define RST9540_RST_TMC (7794L)
+#define RST9540_RST_TS (7795L)
+#define RST9540_RST_TS_MMCM (7796L)
+#define RST9540_STAT (7797L)
+#define RST9540_STAT_CORE_MMCM_LOCKED (7798L)
+#define RST9540_STAT_DDR4_MMCM_LOCKED (7799L)
+#define RST9540_STAT_DDR4_PLL_LOCKED (7800L)
+#define RST9540_STAT_PTP_MMCM_LOCKED (7801L)
+#define RST9540_STAT_SYS_MMCM_LOCKED (7802L)
+#define RST9540_STAT_TS_MMCM_LOCKED (7803L)
+#define RST9540_STICKY (7804L)
+#define RST9540_STICKY_CORE_MMCM_UNLOCKED (7805L)
+#define RST9540_STICKY_DDR4_MMCM_UNLOCKED (7806L)
+#define RST9540_STICKY_DDR4_PLL_UNLOCKED (7807L)
+#define RST9540_STICKY_PTP_MMCM_UNLOCKED (7808L)
+#define RST9540_STICKY_SYS_MMCM_UNLOCKED (7809L)
+#define RST9540_STICKY_TS_MMCM_UNLOCKED (7810L)
+/* RST9541 */
+#define RST9541_LATCH (7811L)
+#define RST9541_LATCH_DDR4_CALIB_COMPLETE (7812L)
+#define RST9541_LATCH_PHY_RDY (7813L)
+#define RST9541_POWER (7814L)
+#define RST9541_POWER_PU_PHY (7815L)
+#define RST9541_RST (7816L)
+#define RST9541_RST_DDR4 (7817L)
+#define RST9541_RST_PERIPH (7818L)
+#define RST9541_RST_PHY (7819L)
+#define RST9541_RST_POWER (7820L)
+#define RST9541_RST_SYS (7821L)
+#define RST9541_STAT (7822L)
+#define RST9541_STAT_DDR4_CALIB_COMPLETE (7823L)
+#define RST9541_STAT_PHY_RDY (7824L)
+/* RST9542 */
+#define RST9542_LATCH (7825L)
+#define RST9542_LATCH_DDR4_CALIB_COMPLETE (7826L)
+#define RST9542_LATCH_PHY_RDY (7827L)
+#define RST9542_POWER (7828L)
+#define RST9542_POWER_PU_PHY (7829L)
+#define RST9542_RST (7830L)
+#define RST9542_RST_DDR4 (7831L)
+#define RST9542_RST_PERIPH (7832L)
+#define RST9542_RST_PHY (7833L)
+#define RST9542_RST_SYS (7834L)
+#define RST9542_STAT (7835L)
+#define RST9542_STAT_DDR4_CALIB_COMPLETE (7836L)
+#define RST9542_STAT_PHY_RDY (7837L)
+/* RST9543 */
+#define RST9543_CTRL (7838L)
+#define RST9543_CTRL_PTP_MMCM_CLKSEL (7839L)
+#define RST9543_CTRL_TS_CLKSEL (7840L)
+#define RST9543_CTRL_TS_CLKSEL_OVERRIDE (7841L)
+#define RST9543_POWER (7842L)
+#define RST9543_POWER_PU_NSEB (7843L)
+#define RST9543_POWER_PU_PHY (7844L)
+#define RST9543_RST (7845L)
+#define RST9543_RST_CORE_MMCM (7846L)
+#define RST9543_RST_DDR4 (7847L)
+#define RST9543_RST_MAC_RX (7848L)
+#define RST9543_RST_PERIPH (7849L)
+#define RST9543_RST_PHY (7850L)
+#define RST9543_RST_PTP (7851L)
+#define RST9543_RST_PTP_MMCM (7852L)
+#define RST9543_RST_RPP (7853L)
+#define RST9543_RST_SDC (7854L)
+#define RST9543_RST_SYS (7855L)
+#define RST9543_RST_SYS_MMCM (7856L)
+#define RST9543_RST_TMC (7857L)
+#define RST9543_RST_TS (7858L)
+#define RST9543_RST_TS_MMCM (7859L)
+#define RST9543_STAT (7860L)
+#define RST9543_STAT_CORE_MMCM_LOCKED (7861L)
+#define RST9543_STAT_DDR4_MMCM_LOCKED (7862L)
+#define RST9543_STAT_DDR4_PLL_LOCKED (7863L)
+#define RST9543_STAT_PTP_MMCM_LOCKED (7864L)
+#define RST9543_STAT_SYS_MMCM_LOCKED (7865L)
+#define RST9543_STAT_TS_MMCM_LOCKED (7866L)
+#define RST9543_STICKY (7867L)
+#define RST9543_STICKY_CORE_MMCM_UNLOCKED (7868L)
+#define RST9543_STICKY_DDR4_MMCM_UNLOCKED (7869L)
+#define RST9543_STICKY_DDR4_PLL_UNLOCKED (7870L)
+#define RST9543_STICKY_PTP_MMCM_UNLOCKED (7871L)
+#define RST9543_STICKY_SYS_MMCM_UNLOCKED (7872L)
+#define RST9543_STICKY_TS_MMCM_UNLOCKED (7873L)
+/* RST9544 */
+#define RST9544_CTRL (7874L)
+#define RST9544_CTRL_PTP_MMCM_CLKSEL (7875L)
+#define RST9544_CTRL_TS_CLKSEL (7876L)
+#define RST9544_CTRL_TS_CLKSEL_OVERRIDE (7877L)
+#define RST9544_CTRL_TS_CLKSEL_REF (7878L)
+#define RST9544_POWER (7879L)
+#define RST9544_POWER_PU_NSEB (7880L)
+#define RST9544_POWER_PU_PHY (7881L)
+#define RST9544_RST (7882L)
+#define RST9544_RST_CORE_MMCM (7883L)
+#define RST9544_RST_DDR4 (7884L)
+#define RST9544_RST_NFV_OVS (7885L)
+#define RST9544_RST_PERIPH (7886L)
+#define RST9544_RST_PHY (7887L)
+#define RST9544_RST_PTP (7888L)
+#define RST9544_RST_PTP_MMCM (7889L)
+#define RST9544_RST_RPP (7890L)
+#define RST9544_RST_SDC (7891L)
+#define RST9544_RST_SYS (7892L)
+#define RST9544_RST_SYS_MMCM (7893L)
+#define RST9544_RST_TMC (7894L)
+#define RST9544_RST_TS (7895L)
+#define RST9544_RST_TSM_REF_MMCM (7896L)
+#define RST9544_RST_TS_MMCM (7897L)
+#define RST9544_STAT (7898L)
+#define RST9544_STAT_CORE_MMCM_LOCKED (7899L)
+#define RST9544_STAT_DDR4_MMCM_LOCKED (7900L)
+#define RST9544_STAT_DDR4_PLL_LOCKED (7901L)
+#define RST9544_STAT_PTP_MMCM_LOCKED (7902L)
+#define RST9544_STAT_SYS_MMCM_LOCKED (7903L)
+#define RST9544_STAT_TSM_REF_MMCM_LOCKED (7904L)
+#define RST9544_STAT_TS_MMCM_LOCKED (7905L)
+#define RST9544_STICKY (7906L)
+#define RST9544_STICKY_CORE_MMCM_UNLOCKED (7907L)
+#define RST9544_STICKY_DDR4_MMCM_UNLOCKED (7908L)
+#define RST9544_STICKY_DDR4_PLL_UNLOCKED (7909L)
+#define RST9544_STICKY_PCI_SYS_MMCM_UNLOCKED (7910L)
+#define RST9544_STICKY_PTP_MMCM_UNLOCKED (7911L)
+#define RST9544_STICKY_SYS_MMCM_UNLOCKED (7912L)
+#define RST9544_STICKY_TSM_REF_MMCM_UNLOCKED (7913L)
+#define RST9544_STICKY_TS_MMCM_UNLOCKED (7914L)
+/* RST9545 */
+#define RST9545_CTRL (7915L)
+#define RST9545_CTRL_PTP_MMCM_CLKSEL (7916L)
+#define RST9545_CTRL_TS_CLKSEL (7917L)
+#define RST9545_CTRL_TS_CLKSEL_OVERRIDE (7918L)
+#define RST9545_POWER (7919L)
+#define RST9545_POWER_PU_NSEB (7920L)
+#define RST9545_POWER_PU_PHY (7921L)
+#define RST9545_RST (7922L)
+#define RST9545_RST_CORE_MMCM (7923L)
+#define RST9545_RST_DDR4 (7924L)
+#define RST9545_RST_PERIPH (7925L)
+#define RST9545_RST_PHY (7926L)
+#define RST9545_RST_PTP (7927L)
+#define RST9545_RST_PTP_MMCM (7928L)
+#define RST9545_RST_RPP (7929L)
+#define RST9545_RST_SDC (7930L)
+#define RST9545_RST_SYS (7931L)
+#define RST9545_RST_SYS_MMCM (7932L)
+#define RST9545_RST_TMC (7933L)
+#define RST9545_RST_TS (7934L)
+#define RST9545_RST_TS_MMCM (7935L)
+#define RST9545_STAT (7936L)
+#define RST9545_STAT_CORE_MMCM_LOCKED (7937L)
+#define RST9545_STAT_DDR4_MMCM_LOCKED (7938L)
+#define RST9545_STAT_DDR4_PLL_LOCKED (7939L)
+#define RST9545_STAT_PTP_MMCM_LOCKED (7940L)
+#define RST9545_STAT_SYS_MMCM_LOCKED (7941L)
+#define RST9545_STAT_TS_MMCM_LOCKED (7942L)
+#define RST9545_STICKY (7943L)
+#define RST9545_STICKY_CORE_MMCM_UNLOCKED (7944L)
+#define RST9545_STICKY_DDR4_MMCM_UNLOCKED (7945L)
+#define RST9545_STICKY_DDR4_PLL_UNLOCKED (7946L)
+#define RST9545_STICKY_PTP_MMCM_UNLOCKED (7947L)
+#define RST9545_STICKY_SYS_MMCM_UNLOCKED (7948L)
+#define RST9545_STICKY_TS_MMCM_UNLOCKED (7949L)
+/* RST9546 */
+#define RST9546_CTRL (7950L)
+#define RST9546_CTRL_PTP_MMCM_CLKSEL (7951L)
+#define RST9546_CTRL_TS_CLKSEL (7952L)
+#define RST9546_CTRL_TS_CLKSEL_OVERRIDE (7953L)
+#define RST9546_POWER (7954L)
+#define RST9546_POWER_PU_NSEB (7955L)
+#define RST9546_POWER_PU_PHY (7956L)
+#define RST9546_RST (7957L)
+#define RST9546_RST_CORE_MMCM (7958L)
+#define RST9546_RST_DDR4 (7959L)
+#define RST9546_RST_MAC_RX (7960L)
+#define RST9546_RST_MAC_TX (7961L)
+#define RST9546_RST_PCS_RX (7962L)
+#define RST9546_RST_PERIPH (7963L)
+#define RST9546_RST_PHY (7964L)
+#define RST9546_RST_PTP (7965L)
+#define RST9546_RST_PTP_MMCM (7966L)
+#define RST9546_RST_RPP (7967L)
+#define RST9546_RST_SDC (7968L)
+#define RST9546_RST_SERDES_RX (7969L)
+#define RST9546_RST_SERDES_RX_DATAPATH (7970L)
+#define RST9546_RST_SERDES_TX (7971L)
+#define RST9546_RST_SYS (7972L)
+#define RST9546_RST_SYS_MMCM (7973L)
+#define RST9546_RST_TMC (7974L)
+#define RST9546_RST_TS (7975L)
+#define RST9546_RST_TS_MMCM (7976L)
+#define RST9546_STAT (7977L)
+#define RST9546_STAT_CORE_MMCM_LOCKED (7978L)
+#define RST9546_STAT_DDR4_MMCM_LOCKED (7979L)
+#define RST9546_STAT_DDR4_PLL_LOCKED (7980L)
+#define RST9546_STAT_PTP_MMCM_LOCKED (7981L)
+#define RST9546_STAT_SYS_MMCM_LOCKED (7982L)
+#define RST9546_STAT_TS_MMCM_LOCKED (7983L)
+#define RST9546_STICKY (7984L)
+#define RST9546_STICKY_CORE_MMCM_UNLOCKED (7985L)
+#define RST9546_STICKY_DDR4_MMCM_UNLOCKED (7986L)
+#define RST9546_STICKY_DDR4_PLL_UNLOCKED (7987L)
+#define RST9546_STICKY_PTP_MMCM_UNLOCKED (7988L)
+#define RST9546_STICKY_SYS_MMCM_UNLOCKED (7989L)
+#define RST9546_STICKY_TS_MMCM_UNLOCKED (7990L)
+/* RST9547 */
+#define RST9547_LATCH (7991L)
+#define RST9547_LATCH_DDR4_CALIB_COMPLETE (7992L)
+#define RST9547_LATCH_PHY_RDY (7993L)
+#define RST9547_POWER (7994L)
+#define RST9547_POWER_PU_PHY (7995L)
+#define RST9547_RST (7996L)
+#define RST9547_RST_DDR4 (7997L)
+#define RST9547_RST_PERIPH (7998L)
+#define RST9547_RST_PHY (7999L)
+#define RST9547_RST_SYS (8000L)
+#define RST9547_STAT (8001L)
+#define RST9547_STAT_DDR4_CALIB_COMPLETE (8002L)
+#define RST9547_STAT_PHY_RDY (8003L)
+/* RST9548 */
+#define RST9548_CTRL (8004L)
+#define RST9548_CTRL_PTP_MMCM_CLKSEL (8005L)
+#define RST9548_CTRL_TS_CLKSEL (8006L)
+#define RST9548_CTRL_TS_CLKSEL_OVERRIDE (8007L)
+#define RST9548_POWER (8008L)
+#define RST9548_POWER_PU_NSEB (8009L)
+#define RST9548_POWER_PU_PHY (8010L)
+#define RST9548_RST (8011L)
+#define RST9548_RST_CORE_MMCM (8012L)
+#define RST9548_RST_DDR4 (8013L)
+#define RST9548_RST_PERIPH (8014L)
+#define RST9548_RST_PHY (8015L)
+#define RST9548_RST_PTP (8016L)
+#define RST9548_RST_PTP_MMCM (8017L)
+#define RST9548_RST_RPP (8018L)
+#define RST9548_RST_SDC (8019L)
+#define RST9548_RST_SYS (8020L)
+#define RST9548_RST_SYS_MMCM (8021L)
+#define RST9548_RST_TMC (8022L)
+#define RST9548_RST_TS (8023L)
+#define RST9548_RST_TS_MMCM (8024L)
+#define RST9548_STAT (8025L)
+#define RST9548_STAT_CORE_MMCM_LOCKED (8026L)
+#define RST9548_STAT_DDR4_MMCM_LOCKED (8027L)
+#define RST9548_STAT_DDR4_PLL_LOCKED (8028L)
+#define RST9548_STAT_PTP_MMCM_LOCKED (8029L)
+#define RST9548_STAT_SYS_MMCM_LOCKED (8030L)
+#define RST9548_STAT_TS_MMCM_LOCKED (8031L)
+#define RST9548_STICKY (8032L)
+#define RST9548_STICKY_CORE_MMCM_UNLOCKED (8033L)
+#define RST9548_STICKY_DDR4_MMCM_UNLOCKED (8034L)
+#define RST9548_STICKY_DDR4_PLL_UNLOCKED (8035L)
+#define RST9548_STICKY_PTP_MMCM_UNLOCKED (8036L)
+#define RST9548_STICKY_SYS_MMCM_UNLOCKED (8037L)
+#define RST9548_STICKY_TS_MMCM_UNLOCKED (8038L)
+/* RST9549 */
+#define RST9549_CTRL (8039L)
+#define RST9549_CTRL_PTP_MMCM_CLKSEL (8040L)
+#define RST9549_CTRL_TS_CLKSEL (8041L)
+#define RST9549_CTRL_TS_CLKSEL_OVERRIDE (8042L)
+#define RST9549_POWER (8043L)
+#define RST9549_POWER_PU_NSEB (8044L)
+#define RST9549_POWER_PU_PHY (8045L)
+#define RST9549_RST (8046L)
+#define RST9549_RST_CORE_MMCM (8047L)
+#define RST9549_RST_DDR4 (8048L)
+#define RST9549_RST_PERIPH (8049L)
+#define RST9549_RST_PHY (8050L)
+#define RST9549_RST_PTP (8051L)
+#define RST9549_RST_PTP_MMCM (8052L)
+#define RST9549_RST_RPP (8053L)
+#define RST9549_RST_SDC (8054L)
+#define RST9549_RST_SYS (8055L)
+#define RST9549_RST_SYS_MMCM (8056L)
+#define RST9549_RST_TMC (8057L)
+#define RST9549_RST_TS (8058L)
+#define RST9549_RST_TS_MMCM (8059L)
+#define RST9549_STAT (8060L)
+#define RST9549_STAT_CORE_MMCM_LOCKED (8061L)
+#define RST9549_STAT_DDR4_MMCM_LOCKED (8062L)
+#define RST9549_STAT_DDR4_PLL_LOCKED (8063L)
+#define RST9549_STAT_PTP_MMCM_LOCKED (8064L)
+#define RST9549_STAT_SYS_MMCM_LOCKED (8065L)
+#define RST9549_STAT_TS_MMCM_LOCKED (8066L)
+#define RST9549_STICKY (8067L)
+#define RST9549_STICKY_CORE_MMCM_UNLOCKED (8068L)
+#define RST9549_STICKY_DDR4_MMCM_UNLOCKED (8069L)
+#define RST9549_STICKY_DDR4_PLL_UNLOCKED (8070L)
+#define RST9549_STICKY_PTP_MMCM_UNLOCKED (8071L)
+#define RST9549_STICKY_SYS_MMCM_UNLOCKED (8072L)
+#define RST9549_STICKY_TS_MMCM_UNLOCKED (8073L)
+/* RST9553 */
+#define RST9553_LATCH (8074L)
+#define RST9553_LATCH_DDR4_CALIB_COMPLETE (8075L)
+#define RST9553_LATCH_PHY_RDY (8076L)
+#define RST9553_RST (8077L)
+#define RST9553_RST_DDR4 (8078L)
+#define RST9553_RST_PHY (8079L)
+#define RST9553_RST_SYS (8080L)
+#define RST9553_STAT (8081L)
+#define RST9553_STAT_DDR4_CALIB_COMPLETE (8082L)
+#define RST9553_STAT_PHY_RDY (8083L)
+/* RST9555 */
+#define RST9555_CTRL (8094L)
+#define RST9555_CTRL_PTP_MMCM_CLKSEL (8095L)
+#define RST9555_CTRL_TS_CLKSEL (8096L)
+#define RST9555_CTRL_TS_CLKSEL_OVERRIDE (8097L)
+#define RST9555_POWER (8098L)
+#define RST9555_POWER_PU_NSEB (8099L)
+#define RST9555_POWER_PU_PHY (8100L)
+#define RST9555_RST (8101L)
+#define RST9555_RST_CORE_MMCM (8102L)
+#define RST9555_RST_DDR4 (8103L)
+#define RST9555_RST_PERIPH (8104L)
+#define RST9555_RST_PHY (8105L)
+#define RST9555_RST_PTP (8106L)
+#define RST9555_RST_PTP_MMCM (8107L)
+#define RST9555_RST_RPP (8108L)
+#define RST9555_RST_SDC (8109L)
+#define RST9555_RST_SYS (8110L)
+#define RST9555_RST_SYS_MMCM (8111L)
+#define RST9555_RST_TMC (8112L)
+#define RST9555_RST_TS (8113L)
+#define RST9555_RST_TS_MMCM (8114L)
+#define RST9555_STAT (8115L)
+#define RST9555_STAT_CORE_MMCM_LOCKED (8116L)
+#define RST9555_STAT_DDR4_MMCM_LOCKED (8117L)
+#define RST9555_STAT_DDR4_PLL_LOCKED (8118L)
+#define RST9555_STAT_PTP_MMCM_LOCKED (8119L)
+#define RST9555_STAT_SYS_MMCM_LOCKED (8120L)
+#define RST9555_STAT_TS_MMCM_LOCKED (8121L)
+#define RST9555_STICKY (8122L)
+#define RST9555_STICKY_CORE_MMCM_UNLOCKED (8123L)
+#define RST9555_STICKY_DDR4_MMCM_UNLOCKED (8124L)
+#define RST9555_STICKY_DDR4_PLL_UNLOCKED (8125L)
+#define RST9555_STICKY_PTP_MMCM_UNLOCKED (8126L)
+#define RST9555_STICKY_SYS_MMCM_UNLOCKED (8127L)
+#define RST9555_STICKY_TS_MMCM_UNLOCKED (8128L)
+/* RST9559 */
+#define RST9559_LATCH (8129L)
+#define RST9559_LATCH_DDR4_CALIB_COMPLETE (8130L)
+#define RST9559_LATCH_PHY_RDY (8131L)
+#define RST9559_RST (8132L)
+#define RST9559_RST_DDR4 (8133L)
+#define RST9559_RST_PHY (8134L)
+#define RST9559_RST_SYS (8135L)
+#define RST9559_STAT (8136L)
+#define RST9559_STAT_DDR4_CALIB_COMPLETE (8137L)
+#define RST9559_STAT_PHY_RDY (8138L)
+/* RST9563 */
+#define RST9563_CTRL (8159L)
+#define RST9563_CTRL_PTP_MMCM_CLKSEL (8160L)
+#define RST9563_CTRL_TS_CLKSEL (8161L)
+#define RST9563_CTRL_TS_CLKSEL_OVERRIDE (8162L)
+#define RST9563_POWER (8163L)
+#define RST9563_POWER_PU_NSEB (8164L)
+#define RST9563_POWER_PU_PHY (8165L)
+#define RST9563_RST (8166L)
+#define RST9563_RST_CORE_MMCM (8167L)
+#define RST9563_RST_DDR4 (8168L)
+#define RST9563_RST_MAC_RX (8169L)
+#define RST9563_RST_PERIPH (8170L)
+#define RST9563_RST_PHY (8171L)
+#define RST9563_RST_PTP (8172L)
+#define RST9563_RST_PTP_MMCM (8173L)
+#define RST9563_RST_RPP (8174L)
+#define RST9563_RST_SDC (8175L)
+#define RST9563_RST_SYS (8176L)
+#define RST9563_RST_SYS_MMCM (8177L)
+#define RST9563_RST_TMC (8178L)
+#define RST9563_RST_TS (8179L)
+#define RST9563_RST_TSM_REF_MMCM (8180L)
+#define RST9563_RST_TS_MMCM (8181L)
+#define RST9563_STAT (8182L)
+#define RST9563_STAT_CORE_MMCM_LOCKED (8183L)
+#define RST9563_STAT_DDR4_MMCM_LOCKED (8184L)
+#define RST9563_STAT_DDR4_PLL_LOCKED (8185L)
+#define RST9563_STAT_PTP_MMCM_LOCKED (8186L)
+#define RST9563_STAT_SYS_MMCM_LOCKED (8187L)
+#define RST9563_STAT_TS_MMCM_LOCKED (8188L)
+#define RST9563_STICKY (8189L)
+#define RST9563_STICKY_CORE_MMCM_UNLOCKED (8190L)
+#define RST9563_STICKY_DDR4_MMCM_UNLOCKED (8191L)
+#define RST9563_STICKY_DDR4_PLL_UNLOCKED (8192L)
+#define RST9563_STICKY_PTP_MMCM_UNLOCKED (8193L)
+#define RST9563_STICKY_SYS_MMCM_UNLOCKED (8194L)
+#define RST9563_STICKY_TS_MMCM_UNLOCKED (8195L)
+/* RTD */
+#define RTD_CTRL (8196L)
+#define RTD_CTRL_ENABLE_RTD (8197L)
+#define RTD_CTRL_ENABLE_TX_FLUSH (8198L)
+#define RTD_CTRL_ENABLE_TX_MACPHY (8199L)
+#define RTD_CTRL_RDPTR_UPDATE_TIMER (8200L)
+#define RTD_CTRL_RESERVED (8201L)
+#define RTD_CTRL_TX_SPEED (8202L)
+#define RTD_DEB_REG1 (8203L)
+#define RTD_DEB_REG1_VALUE (8204L)
+#define RTD_DEB_REG2 (8205L)
+#define RTD_DEB_REG2_VALUE (8206L)
+#define RTD_DEB_REG3 (8207L)
+#define RTD_DEB_REG3_VALUE (8208L)
+#define RTD_HOSTBUFFER_ADR_HI (8209L)
+#define RTD_HOSTBUFFER_ADR_HI_VALUE (8210L)
+#define RTD_HOSTBUFFER_ADR_LO (8211L)
+#define RTD_HOSTBUFFER_ADR_LO_VALUE (8212L)
+#define RTD_RDPTR_ADR_HI (8213L)
+#define RTD_RDPTR_ADR_HI_VALUE (8214L)
+#define RTD_RDPTR_ADR_LO (8215L)
+#define RTD_RDPTR_ADR_LO_VALUE (8216L)
+#define RTD_STATUS (8217L)
+#define RTD_STATUS_HB_EMPTY (8218L)
+#define RTD_STATUS_LHF_EMPTY (8219L)
+#define RTD_STATUS_UNPACKER_STATUS (8220L)
+#define RTD_WRPTR (8221L)
+#define RTD_WRPTR_VALUE (8222L)
+/* RTD_HMP */
+#define RTD_HMP_CTRL (8223L)
+#define RTD_HMP_CTRL_ENABLE_HMP_0 (8224L)
+#define RTD_HMP_CTRL_ENABLE_HMP_1 (8225L)
+#define RTD_HMP_CTRL_ENABLE_HMP_2 (8226L)
+#define RTD_HMP_CTRL_ENABLE_HMP_3 (8227L)
+#define RTD_HMP_CTRL_WRPTR_POLL_TIMER (8228L)
+#define RTD_HMP_DEB_REG1 (8229L)
+#define RTD_HMP_DEB_REG1_VALUE (8230L)
+#define RTD_HMP_DEB_REG2 (8231L)
+#define RTD_HMP_DEB_REG2_VALUE (8232L)
+#define RTD_HMP_DEB_REG3 (8233L)
+#define RTD_HMP_DEB_REG3_VALUE (8234L)
+#define RTD_HMP_STATUS (8235L)
+#define RTD_HMP_STATUS_HMP_ACTIVE (8236L)
+#define RTD_HMP_WRPTR_ADR_HI (8237L)
+#define RTD_HMP_WRPTR_ADR_HI_VALUE (8238L)
+#define RTD_HMP_WRPTR_ADR_LO (8239L)
+#define RTD_HMP_WRPTR_ADR_LO_VALUE (8240L)
+/* RTX */
+#define RTX_CTRL (8241L)
+#define RTX_CTRL_PORT (8242L)
+#define RTX_CTRL_SIZE (8243L)
+#define RTX_STATUS (8244L)
+#define RTX_STATUS_AF (8245L)
+#define RTX_STATUS_BUSY (8246L)
+#define RTX_TXF_CTRL (8247L)
+#define RTX_TXF_CTRL_CNT (8248L)
+#define RTX_TXF_DATA (8249L)
+#define RTX_TXF_DATA_PAYLOAD (8250L)
+#define RXAUI_DEBUG (8268L)
+#define RXAUI_DEBUG_MGT_CV_0 (8269L)
+#define RXAUI_DEBUG_MGT_CV_1 (8270L)
+#define RXAUI_DEBUG_MGT_CV_2 (8271L)
+#define RXAUI_DEBUG_MGT_CV_3 (8272L)
+#define RXAUI_DEBUG_MGT_CV_4 (8273L)
+#define RXAUI_DEBUG_MGT_CV_5 (8274L)
+#define RXAUI_DEBUG_MGT_CV_6 (8275L)
+#define RXAUI_DEBUG_MGT_CV_7 (8276L)
+#define RXAUI_DEBUG_MGT_RXLOCK_0 (8277L)
+#define RXAUI_DEBUG_MGT_RXLOCK_1 (8278L)
+#define RXAUI_DEBUG_MGT_RX_RESET (8279L)
+#define RXAUI_DEBUG_MGT_TX_RESET (8280L)
+#define RXAUI_DEBUG_MMCM1_LOCKED (8281L)
+#define RXAUI_DRP_AD (8282L)
+#define RXAUI_DRP_AD_ADDRESS (8283L)
+#define RXAUI_DRP_AD_DFEEYEDACMON (8284L)
+#define RXAUI_DRP_AD_GTX_NO (8285L)
+#define RXAUI_DRP_AD_READY (8286L)
+#define RXAUI_DRP_AD_RESERVED3 (8287L)
+#define RXAUI_DRP_AD_RESERVED4 (8288L)
+#define RXAUI_DRP_DA (8289L)
+#define RXAUI_DRP_DA_DATA (8290L)
+#define RXAUI_GTX_CONFIG (8291L)
+#define RXAUI_GTX_CONFIG_LOOPBACK (8292L)
+#define RXAUI_GTX_CONFIG_LOOPBACKMUX (8293L)
+#define RXAUI_GTX_CONFIG_PRBSCNTRESET (8294L)
+#define RXAUI_GTX_CONFIG_RESERVED6 (8295L)
+#define RXAUI_GTX_CONFIG_RESERVED7 (8296L)
+#define RXAUI_GTX_CONFIG_RXENPRBSTST (8297L)
+#define RXAUI_GTX_CONFIG_RXEQMIX (8298L)
+#define RXAUI_GTX_CONFIG_TXDIFFCTRL (8299L)
+#define RXAUI_GTX_CONFIG_TXENPRBSTST (8300L)
+#define RXAUI_GTX_CONFIG_TXPOSTEMPHAS (8301L)
+#define RXAUI_GTX_CONFIG_TXPRBSFORCEE (8302L)
+#define RXAUI_GTX_CONFIG_TXPREEMPHASI (8303L)
+#define RXAUI_GTX_STAT (8304L)
+#define RXAUI_GTX_STAT_RESERVED10 (8305L)
+#define RXAUI_GTX_STAT_RESERVED11 (8306L)
+#define RXAUI_GTX_STAT_RESERVED12 (8307L)
+#define RXAUI_GTX_STAT_RESERVED13 (8308L)
+#define RXAUI_GTX_STAT_RESERVED8 (8309L)
+#define RXAUI_GTX_STAT_RESERVED9 (8310L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS0 (8311L)
+#define RXAUI_GTX_STAT_RXBUFSTATUS1 (8312L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_0 (8313L)
+#define RXAUI_GTX_STAT_RXBYTEISAL_1 (8314L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_0 (8315L)
+#define RXAUI_GTX_STAT_RXBYTEREAL_1 (8316L)
+#define RXAUI_GTX_STAT_RXCHANREAL_0 (8317L)
+#define RXAUI_GTX_STAT_RXCHANREAL_1 (8318L)
+#define RXAUI_GTX_STAT_RXCOMMADET_0 (8319L)
+#define RXAUI_GTX_STAT_RXCOMMADET_1 (8320L)
+#define RXAUI_GTX_STAT_RXPRBSERR_0 (8321L)
+#define RXAUI_GTX_STAT_RXPRBSERR_1 (8322L)
+/* SDC */
+#define SDC_CELL_CNT (8612L)
+#define SDC_CELL_CNT_CELL_CNT (8613L)
+#define SDC_CELL_CNT_PERIOD (8614L)
+#define SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD (8615L)
+#define SDC_CTRL (8616L)
+#define SDC_CTRL_INIT (8617L)
+#define SDC_CTRL_RESET_POINTERS (8618L)
+#define SDC_CTRL_RUN_TEST (8619L)
+#define SDC_CTRL_STOP_CLIENT (8620L)
+#define SDC_CTRL_TEST_EN (8621L)
+#define SDC_FILL_LVL (8622L)
+#define SDC_FILL_LVL_FILL_LVL (8623L)
+#define SDC_MAX_FILL_LVL (8624L)
+#define SDC_MAX_FILL_LVL_MAX_FILL_LVL (8625L)
+#define SDC_STAT (8626L)
+#define SDC_STAT_CALIB (8627L)
+#define SDC_STAT_CELL_CNT_STOPPED (8628L)
+#define SDC_STAT_ERR_FOUND (8629L)
+#define SDC_STAT_INIT_DONE (8630L)
+#define SDC_STAT_MMCM_LOCK (8631L)
+#define SDC_STAT_PLL_LOCK (8632L)
+#define SDC_STAT_RESETTING (8633L)
+/* SLC */
+#define SLC_RCP_CTRL (8681L)
+#define SLC_RCP_CTRL_ADR (8682L)
+#define SLC_RCP_CTRL_CNT (8683L)
+#define SLC_RCP_DATA (8684L)
+#define SLC_RCP_DATA_PCAP (8685L)
+#define SLC_RCP_DATA_TAIL_DYN (8686L)
+#define SLC_RCP_DATA_TAIL_OFS (8687L)
+#define SLC_RCP_DATA_TAIL_SLC_EN (8688L)
+/* SLC_LR */
+/* SMM */
+#define SMM_CTRL (8770L)
+#define SMM_CTRL_ENABLE (8771L)
+#define SMM_READY_STATUS (8772L)
+#define SMM_READY_STATUS_D (8773L)
+#define SMM_SEG_INVLD_STICKY_STATUS (8774L)
+#define SMM_SEG_INVLD_STICKY_STATUS_D (8775L)
+#define SMM_SEG_MEM_CTRL (8776L)
+#define SMM_SEG_MEM_CTRL_A (8777L)
+#define SMM_SEG_MEM_CTRL_CNT (8778L)
+#define SMM_SEG_MEM_DATA (8779L)
+#define SMM_SEG_MEM_DATA_PHYADDR (8780L)
+#define SMM_SEG_MEM_DATA_SIZE (8781L)
+#define SMM_START_SEG_MEM_CTRL (8782L)
+#define SMM_START_SEG_MEM_CTRL_A (8783L)
+#define SMM_START_SEG_MEM_CTRL_CNT (8784L)
+#define SMM_START_SEG_MEM_DATA (8785L)
+#define SMM_START_SEG_MEM_DATA_SEG (8786L)
+/* SPIM */
+#define SPIM_CFG (8793L)
+#define SPIM_CFG_PRE (8794L)
+#define SPIM_CMD (8795L)
+#define SPIM_CMD_ADDR (8796L)
+#define SPIM_CMD_CMD (8797L)
+#define SPIM_CMD_DATA (8798L)
+#define SPIM_CONF0 (8799L)
+#define SPIM_CONF0_BYTE_PACE (8800L)
+#define SPIM_CONF0_MIRROR_EN (8801L)
+#define SPIM_CONF0_MSB_FIRST (8802L)
+#define SPIM_CONF0_PRESCAL_CLK (8803L)
+#define SPIM_CONF0_RESTART (8804L)
+#define SPIM_CONF0_RST (8805L)
+#define SPIM_CONF0_SYNC_MON_EN (8806L)
+#define SPIM_CONF1 (8807L)
+#define SPIM_CONF1_MIRROR_PACE (8808L)
+#define SPIM_CONF1_MIRROR_SCAN (8809L)
+#define SPIM_CONF1_SYNCTIMEOUT (8810L)
+#define SPIM_CONF2 (8811L)
+#define SPIM_CONF2_MIRROR_PRESC (8812L)
+#define SPIM_CONF2_OPCODE_RD (8813L)
+#define SPIM_CONF2_OPCODE_WR (8814L)
+#define SPIM_CONF3 (8815L)
+#define SPIM_CONF3_MIRROR_RDADR (8816L)
+#define SPIM_CONF3_MIRROR_WRADR (8817L)
+#define SPIM_CR (8818L)
+#define SPIM_CR_EN (8819L)
+#define SPIM_CR_LOOP (8820L)
+#define SPIM_CR_RXRST (8821L)
+#define SPIM_CR_TXRST (8822L)
+#define SPIM_DRR (8823L)
+#define SPIM_DRR_DRR (8824L)
+#define SPIM_DTR (8825L)
+#define SPIM_DTR_DTR (8826L)
+#define SPIM_REPLY (8827L)
+#define SPIM_REPLY_RDDATA (8828L)
+#define SPIM_SR (8829L)
+#define SPIM_SR_DONE (8830L)
+#define SPIM_SR_RXEMPTY (8831L)
+#define SPIM_SR_RXFULL (8832L)
+#define SPIM_SR_RXLVL (8833L)
+#define SPIM_SR_TXEMPTY (8834L)
+#define SPIM_SR_TXFULL (8835L)
+#define SPIM_SR_TXLVL (8836L)
+#define SPIM_SRR (8837L)
+#define SPIM_SRR_RST (8838L)
+#define SPIM_STATUS (8839L)
+#define SPIM_STATUS_CMDPENDING (8840L)
+#define SPIM_STATUS_RESERVED (8841L)
+#define SPIM_STATUS_RESYNCDETECT (8842L)
+#define SPIM_STATUS_RESYNCING (8843L)
+/* SPIS */
+#define SPIS_CR (8844L)
+#define SPIS_CR_DEBUG (8845L)
+#define SPIS_CR_EN (8846L)
+#define SPIS_CR_LOOP (8847L)
+#define SPIS_CR_RXRST (8848L)
+#define SPIS_CR_TXRST (8849L)
+#define SPIS_DRR (8850L)
+#define SPIS_DRR_DRR (8851L)
+#define SPIS_DTR (8852L)
+#define SPIS_DTR_DTR (8853L)
+#define SPIS_RAM_CTRL (8854L)
+#define SPIS_RAM_CTRL_ADR (8855L)
+#define SPIS_RAM_CTRL_CNT (8856L)
+#define SPIS_RAM_DATA (8857L)
+#define SPIS_RAM_DATA_DATA (8858L)
+#define SPIS_SR (8859L)
+#define SPIS_SR_DONE (8860L)
+#define SPIS_SR_FRAME_ERR (8861L)
+#define SPIS_SR_READ_ERR (8862L)
+#define SPIS_SR_RXEMPTY (8863L)
+#define SPIS_SR_RXFULL (8864L)
+#define SPIS_SR_RXLVL (8865L)
+#define SPIS_SR_TXEMPTY (8866L)
+#define SPIS_SR_TXFULL (8867L)
+#define SPIS_SR_TXLVL (8868L)
+#define SPIS_SR_WRITE_ERR (8869L)
+#define SPIS_SRR (8870L)
+#define SPIS_SRR_RST (8871L)
+/* STA */
+#define STA_BYTE (8872L)
+#define STA_BYTE_CNT (8873L)
+#define STA_CFG (8874L)
+#define STA_CFG_CNT_CLEAR (8875L)
+#define STA_CFG_CNT_FRZ (8876L)
+#define STA_CFG_DMA_ENA (8877L)
+#define STA_CFG_TX_DISABLE (8878L)
+#define STA_CV_ERR (8879L)
+#define STA_CV_ERR_CNT (8880L)
+#define STA_FCS_ERR (8881L)
+#define STA_FCS_ERR_CNT (8882L)
+#define STA_HOST_ADR_LSB (8883L)
+#define STA_HOST_ADR_LSB_LSB (8884L)
+#define STA_HOST_ADR_MSB (8885L)
+#define STA_HOST_ADR_MSB_MSB (8886L)
+#define STA_PCKT (8887L)
+#define STA_PCKT_CNT (8888L)
+#define STA_STATUS (8889L)
+#define STA_STATUS_STAT_TOGGLE_MISSED (8890L)
+/* TBH */
+#define TBH_CTRL (9103L)
+#define TBH_CTRL_DISABLE_LR_LB (9104L)
+#define TBH_CTRL_ENABLE (9105L)
+#define TBH_CTRL_PORT (9106L)
+#define TBH_CTRL_PORT_AUS (9107L)
+#define TBH_CTRL_SEGMENT (9108L)
+#define TBH_CTRL_SEGMENT_SIZE (9109L)
+#define TBH_DBG_DLN_ERR (9110L)
+#define TBH_DBG_DLN_ERR_E (9111L)
+#define TBH_DBG_DLN_ERR_HB (9112L)
+#define TBH_DBG_ILLEGAL_RANGE (9113L)
+#define TBH_DBG_ILLEGAL_RANGE_E (9114L)
+#define TBH_DBG_ILLEGAL_RANGE_HB (9115L)
+#define TBH_DBG_MAX_PCI_QUIET (9116L)
+#define TBH_DBG_MAX_PCI_QUIET_CYCLES (9117L)
+#define TBH_DISABLE (9118L)
+#define TBH_DISABLE_DISABLE (9119L)
+#define TBH_DISABLE_HB (9120L)
+#define TBH_HB_DSC_MEM_CTRL (9121L)
+#define TBH_HB_DSC_MEM_CTRL_ADR (9122L)
+#define TBH_HB_DSC_MEM_CTRL_CNT (9123L)
+#define TBH_HB_DSC_MEM_DATA (9124L)
+#define TBH_HB_DSC_MEM_DATA_DT (9125L)
+#define TBH_HB_DSC_MEM_DATA_FCS (9126L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_POS (9127L)
+#define TBH_HB_DSC_MEM_DATA_FCS_CTL_USE (9128L)
+#define TBH_HB_DSC_MEM_DATA_IG_POS (9129L)
+#define TBH_HB_DSC_MEM_DATA_IG_USE (9130L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_POS (9131L)
+#define TBH_HB_DSC_MEM_DATA_OCS_CMD_USE (9132L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_POS (9133L)
+#define TBH_HB_DSC_MEM_DATA_OFS0_USE (9134L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_POS (9135L)
+#define TBH_HB_DSC_MEM_DATA_OFS1_USE (9136L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_POS (9137L)
+#define TBH_HB_DSC_MEM_DATA_OFS2_USE (9138L)
+#define TBH_HB_DSC_MEM_DATA_PFD (9139L)
+#define TBH_HB_DSC_MEM_DATA_PORT (9140L)
+#define TBH_HB_DSC_MEM_DATA_PORT_MASK (9141L)
+#define TBH_HB_DSC_MEM_DATA_PORT_POS (9142L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_POS (9143L)
+#define TBH_HB_DSC_MEM_DATA_SET_CLOCK_USE (9144L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_POS (9145L)
+#define TBH_HB_DSC_MEM_DATA_SW_TFD_TYPE_USE (9146L)
+#define TBH_HB_DSC_MEM_DATA_TS_APPEND (9147L)
+#define TBH_HB_DSC_MEM_DATA_TS_FMT (9148L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_POS (9149L)
+#define TBH_HB_DSC_MEM_DATA_TS_INJECT_USE (9150L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_POS (9151L)
+#define TBH_HB_DSC_MEM_DATA_TX_NOW_USE (9152L)
+#define TBH_HB_DSC_MEM_DATA_TX_ON_TS (9153L)
+#define TBH_HB_DSC_MEM_DATA_WL_USE (9154L)
+#define TBH_HB_INFO_MEM_CTRL (9155L)
+#define TBH_HB_INFO_MEM_CTRL_A (9156L)
+#define TBH_HB_INFO_MEM_CTRL_CNT (9157L)
+#define TBH_HB_INFO_MEM_DATA (9158L)
+#define TBH_HB_INFO_MEM_DATA_SIZE (9159L)
+#define TBH_HB_PORTS_MEM_CTRL (9160L)
+#define TBH_HB_PORTS_MEM_CTRL_A (9161L)
+#define TBH_HB_PORTS_MEM_CTRL_CNT (9162L)
+#define TBH_HB_PORTS_MEM_DATA (9163L)
+#define TBH_HB_PORTS_MEM_DATA_MAPPING (9164L)
+#define TBH_PORT_MAPPING (9165L)
+#define TBH_PORT_MAPPING_P0 (9166L)
+#define TBH_PORT_MAPPING_P1 (9167L)
+#define TBH_PORT_MAPPING_P2 (9168L)
+#define TBH_PORT_MAPPING_P3 (9169L)
+#define TBH_PORT_MAPPING_P4 (9170L)
+#define TBH_PORT_MAPPING_P5 (9171L)
+#define TBH_PORT_MAPPING_P6 (9172L)
+#define TBH_PORT_MAPPING_P7 (9173L)
+#define TBH_SET_RD_POINTER (9174L)
+#define TBH_SET_RD_POINTER_HB (9175L)
+#define TBH_SET_RD_POINTER_OFFSET (9176L)
+#define TBH_STATUS (9177L)
+#define TBH_STATUS_STOPPED (9178L)
+/* TEMPMON */
+#define TEMPMON_ALARMS (9179L)
+#define TEMPMON_ALARMS_OT (9180L)
+#define TEMPMON_ALARMS_OT_OVERWR (9181L)
+#define TEMPMON_ALARMS_OT_OVERWRVAL (9182L)
+#define TEMPMON_ALARMS_TEMP (9183L)
+#define TEMPMON_STAT (9184L)
+#define TEMPMON_STAT_TEMP (9185L)
+/* TINT */
+#define TINT_CTRL (9186L)
+#define TINT_CTRL_INTERVAL (9187L)
+#define TINT_STATUS (9188L)
+#define TINT_STATUS_DELAYED (9189L)
+#define TINT_STATUS_SKIPPED (9190L)
+/* TMC */
+#define TMC_PORT_RPL (9191L)
+#define TMC_PORT_RPL_P0 (9192L)
+#define TMC_PORT_RPL_P1 (9193L)
+#define TMC_PORT_RPL_P2 (9194L)
+#define TMC_PORT_RPL_P3 (9195L)
+#define TMC_PORT_RPL_P4 (9196L)
+#define TMC_PORT_RPL_P5 (9197L)
+#define TMC_PORT_RPL_P6 (9198L)
+#define TMC_PORT_RPL_P7 (9199L)
+/* TSM */
+#define TSM_ADJ_FINE_N (9200L)
+#define TSM_ADJ_FINE_N_2DY (9201L)
+#define TSM_ADJ_FINE_N_2DY2DX (9202L)
+#define TSM_ADJ_FINE_P (9203L)
+#define TSM_ADJ_FINE_P_2DY (9204L)
+#define TSM_ADJ_FINE_P_2DY2DX (9205L)
+#define TSM_ADJ_LIMIT_HI (9206L)
+#define TSM_ADJ_LIMIT_HI_LIMIT (9207L)
+#define TSM_ADJ_LIMIT_LO (9208L)
+#define TSM_ADJ_LIMIT_LO_LIMIT (9209L)
+#define TSM_BASIC_2DY (9210L)
+#define TSM_BASIC_2DY_2DY (9211L)
+#define TSM_BASIC_2DY2DX (9212L)
+#define TSM_BASIC_2DY2DX_2DY2DX (9213L)
+#define TSM_CON0_CONFIG (9214L)
+#define TSM_CON0_CONFIG_BLIND (9215L)
+#define TSM_CON0_CONFIG_DC_SRC (9216L)
+#define TSM_CON0_CONFIG_PORT (9217L)
+#define TSM_CON0_CONFIG_PPSIN_2_5V (9218L)
+#define TSM_CON0_CONFIG_SAMPLE_EDGE (9219L)
+#define TSM_CON0_INTERFACE (9220L)
+#define TSM_CON0_INTERFACE_EX_TERM (9221L)
+#define TSM_CON0_INTERFACE_IN_REF_PWM (9222L)
+#define TSM_CON0_INTERFACE_PWM_ENA (9223L)
+#define TSM_CON0_INTERFACE_RESERVED (9224L)
+#define TSM_CON0_INTERFACE_VTERM_PWM (9225L)
+#define TSM_CON0_SAMPLE_HI (9226L)
+#define TSM_CON0_SAMPLE_HI_SEC (9227L)
+#define TSM_CON0_SAMPLE_LO (9228L)
+#define TSM_CON0_SAMPLE_LO_NS (9229L)
+#define TSM_CON1_CONFIG (9230L)
+#define TSM_CON1_CONFIG_BLIND (9231L)
+#define TSM_CON1_CONFIG_DC_SRC (9232L)
+#define TSM_CON1_CONFIG_PORT (9233L)
+#define TSM_CON1_CONFIG_PPSIN_2_5V (9234L)
+#define TSM_CON1_CONFIG_SAMPLE_EDGE (9235L)
+#define TSM_CON1_SAMPLE_HI (9236L)
+#define TSM_CON1_SAMPLE_HI_SEC (9237L)
+#define TSM_CON1_SAMPLE_LO (9238L)
+#define TSM_CON1_SAMPLE_LO_NS (9239L)
+#define TSM_CON2_CONFIG (9240L)
+#define TSM_CON2_CONFIG_BLIND (9241L)
+#define TSM_CON2_CONFIG_DC_SRC (9242L)
+#define TSM_CON2_CONFIG_PORT (9243L)
+#define TSM_CON2_CONFIG_PPSIN_2_5V (9244L)
+#define TSM_CON2_CONFIG_SAMPLE_EDGE (9245L)
+#define TSM_CON2_SAMPLE_HI (9246L)
+#define TSM_CON2_SAMPLE_HI_SEC (9247L)
+#define TSM_CON2_SAMPLE_LO (9248L)
+#define TSM_CON2_SAMPLE_LO_NS (9249L)
+#define TSM_CON3_CONFIG (9250L)
+#define TSM_CON3_CONFIG_BLIND (9251L)
+#define TSM_CON3_CONFIG_PORT (9252L)
+#define TSM_CON3_CONFIG_SAMPLE_EDGE (9253L)
+#define TSM_CON3_SAMPLE_HI (9254L)
+#define TSM_CON3_SAMPLE_HI_SEC (9255L)
+#define TSM_CON3_SAMPLE_LO (9256L)
+#define TSM_CON3_SAMPLE_LO_NS (9257L)
+#define TSM_CON4_CONFIG (9258L)
+#define TSM_CON4_CONFIG_BLIND (9259L)
+#define TSM_CON4_CONFIG_PORT (9260L)
+#define TSM_CON4_CONFIG_SAMPLE_EDGE (9261L)
+#define TSM_CON4_SAMPLE_HI (9262L)
+#define TSM_CON4_SAMPLE_HI_SEC (9263L)
+#define TSM_CON4_SAMPLE_LO (9264L)
+#define TSM_CON4_SAMPLE_LO_NS (9265L)
+#define TSM_CON5_CONFIG (9266L)
+#define TSM_CON5_CONFIG_BLIND (9267L)
+#define TSM_CON5_CONFIG_PORT (9268L)
+#define TSM_CON5_CONFIG_SAMPLE_EDGE (9269L)
+#define TSM_CON5_SAMPLE_HI (9270L)
+#define TSM_CON5_SAMPLE_HI_SEC (9271L)
+#define TSM_CON5_SAMPLE_LO (9272L)
+#define TSM_CON5_SAMPLE_LO_TIME (9273L)
+#define TSM_CON6_CONFIG (9274L)
+#define TSM_CON6_CONFIG_BLIND (9275L)
+#define TSM_CON6_CONFIG_PORT (9276L)
+#define TSM_CON6_CONFIG_SAMPLE_EDGE (9277L)
+#define TSM_CON6_SAMPLE_HI (9278L)
+#define TSM_CON6_SAMPLE_HI_SEC (9279L)
+#define TSM_CON6_SAMPLE_LO (9280L)
+#define TSM_CON6_SAMPLE_LO_NS (9281L)
+#define TSM_CON7_HOST_SAMPLE_HI (9282L)
+#define TSM_CON7_HOST_SAMPLE_HI_SEC (9283L)
+#define TSM_CON7_HOST_SAMPLE_LO (9284L)
+#define TSM_CON7_HOST_SAMPLE_LO_NS (9285L)
+#define TSM_CONFIG (9286L)
+#define TSM_CONFIG_NTTS_SRC (9287L)
+#define TSM_CONFIG_NTTS_SYNC (9288L)
+#define TSM_CONFIG_TIMESET_EDGE (9289L)
+#define TSM_CONFIG_TIMESET_SRC (9290L)
+#define TSM_CONFIG_TIMESET_UP (9291L)
+#define TSM_CONFIG_TS_FORMAT (9292L)
+#define TSM_CTRL (9293L)
+#define TSM_CTRL_DCEN_CON0 (9294L)
+#define TSM_CTRL_DCEN_CON1 (9295L)
+#define TSM_CTRL_DCEN_CON2 (9296L)
+#define TSM_CTRL_FORMAT (9297L)
+#define TSM_CTRL_HIGH_SAMPLE (9298L)
+#define TSM_CTRL_LED_CON0 (9299L)
+#define TSM_CTRL_LED_CON1 (9300L)
+#define TSM_CTRL_LED_CON2 (9301L)
+#define TSM_CTRL_PRIMARY_STAT (9302L)
+#define TSM_CTRL_OEN_CON0 (9303L)
+#define TSM_CTRL_OEN_CON1 (9304L)
+#define TSM_CTRL_OEN_CON2 (9305L)
+#define TSM_CTRL_PPSEN (9306L)
+#define TSM_CTRL_PPS_NEGEDGE (9307L)
+#define TSM_CTRL_PPS_TIME_UP (9308L)
+#define TSM_CTRL_PTP_TIME_UP (9309L)
+#define TSM_CTRL_RESERVED (9310L)
+#define TSM_CTRL_SEL_EXTSRC (9311L)
+#define TSM_CTRL_SYNEN (9312L)
+#define TSM_CTRL_TS_CON0 (9313L)
+#define TSM_CTRL_TS_CON1 (9314L)
+#define TSM_CTRL_TS_CON2 (9315L)
+#define TSM_EXT_STAT (9316L)
+#define TSM_EXT_STAT_STAT (9317L)
+#define TSM_EXT_TIME_HI (9318L)
+#define TSM_EXT_TIME_HI_TIME (9319L)
+#define TSM_EXT_TIME_LO (9320L)
+#define TSM_EXT_TIME_LO_TIME (9321L)
+#define TSM_INTERFACE (9322L)
+#define TSM_INTERFACE_EX_TERM (9323L)
+#define TSM_INTERFACE_IN_REF_PWM (9324L)
+#define TSM_INTERFACE_PWM_ENA (9325L)
+#define TSM_INTERFACE_RESERVED (9326L)
+#define TSM_INTERFACE_VTERM_PWM (9327L)
+#define TSM_INT_CONFIG (9328L)
+#define TSM_INT_CONFIG_AUTO_DISABLE (9329L)
+#define TSM_INT_CONFIG_MASK (9330L)
+#define TSM_INT_STAT (9331L)
+#define TSM_INT_STAT_CAUSE (9332L)
+#define TSM_INT_STAT_ENABLE (9333L)
+#define TSM_INT_TIME_HI (9334L)
+#define TSM_INT_TIME_HI_TIME (9335L)
+#define TSM_INT_TIME_LO (9336L)
+#define TSM_INT_TIME_LO_TIME (9337L)
+#define TSM_LED (9338L)
+#define TSM_LED_LED0_BG_COLOR (9339L)
+#define TSM_LED_LED0_COLOR (9340L)
+#define TSM_LED_LED0_MODE (9341L)
+#define TSM_LED_LED0_SRC (9342L)
+#define TSM_LED_LED1_BG_COLOR (9343L)
+#define TSM_LED_LED1_COLOR (9344L)
+#define TSM_LED_LED1_MODE (9345L)
+#define TSM_LED_LED1_SRC (9346L)
+#define TSM_LED_LED2_BG_COLOR (9347L)
+#define TSM_LED_LED2_COLOR (9348L)
+#define TSM_LED_LED2_MODE (9349L)
+#define TSM_LED_LED2_SRC (9350L)
+#define TSM_NTTS_CONFIG (9351L)
+#define TSM_NTTS_CONFIG_AUTO_HARDSET (9352L)
+#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (9353L)
+#define TSM_NTTS_CONFIG_HIGH_SAMPLE (9354L)
+#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (9355L)
+#define TSM_NTTS_CTRL (9356L)
+#define TSM_NTTS_CTRL_NTTS_CMD (9357L)
+#define TSM_NTTS_DATA_HI (9358L)
+#define TSM_NTTS_DATA_HI_DATA (9359L)
+#define TSM_NTTS_DATA_LO (9360L)
+#define TSM_NTTS_DATA_LO_DATA (9361L)
+#define TSM_NTTS_EXT_STAT (9362L)
+#define TSM_NTTS_EXT_STAT_PRIMARY_ID (9363L)
+#define TSM_NTTS_EXT_STAT_PRIMARY_REV (9364L)
+#define TSM_NTTS_EXT_STAT_PRIMARY_STAT (9365L)
+#define TSM_NTTS_LIMIT_HI (9366L)
+#define TSM_NTTS_LIMIT_HI_SEC (9367L)
+#define TSM_NTTS_LIMIT_LO (9368L)
+#define TSM_NTTS_LIMIT_LO_NS (9369L)
+#define TSM_NTTS_OFFSET (9370L)
+#define TSM_NTTS_OFFSET_NS (9371L)
+#define TSM_NTTS_SAMPLE_HI (9372L)
+#define TSM_NTTS_SAMPLE_HI_SEC (9373L)
+#define TSM_NTTS_SAMPLE_LO (9374L)
+#define TSM_NTTS_SAMPLE_LO_NS (9375L)
+#define TSM_NTTS_STAT (9376L)
+#define TSM_NTTS_STAT_NTTS_VALID (9377L)
+#define TSM_NTTS_STAT_SIGNAL_LOST (9378L)
+#define TSM_NTTS_STAT_SYNC_LOST (9379L)
+#define TSM_NTTS_TS_T0_HI (9380L)
+#define TSM_NTTS_TS_T0_HI_TIME (9381L)
+#define TSM_NTTS_TS_T0_LO (9382L)
+#define TSM_NTTS_TS_T0_LO_TIME (9383L)
+#define TSM_NTTS_TS_T0_OFFSET (9384L)
+#define TSM_NTTS_TS_T0_OFFSET_COUNT (9385L)
+#define TSM_OFFSET_HI (9386L)
+#define TSM_OFFSET_HI_OFFSET (9387L)
+#define TSM_OFFSET_LO (9388L)
+#define TSM_OFFSET_LO_OFFSET (9389L)
+#define TSM_PB_CTRL (9390L)
+#define TSM_PB_CTRL_INSTMEM_WR (9391L)
+#define TSM_PB_CTRL_RESET (9392L)
+#define TSM_PB_CTRL_RST (9393L)
+#define TSM_PB_INSTMEM (9394L)
+#define TSM_PB_INSTMEM_ADDR (9395L)
+#define TSM_PB_INSTMEM_DATA (9396L)
+#define TSM_PB_INSTMEM_MEM_ADDR (9397L)
+#define TSM_PB_INSTMEM_MEM_DATA (9398L)
+#define TSM_PI_CTRL_I (9399L)
+#define TSM_PI_CTRL_I_VAL (9400L)
+#define TSM_PI_CTRL_KI (9401L)
+#define TSM_PI_CTRL_KI_GAIN (9402L)
+#define TSM_PI_CTRL_KP (9403L)
+#define TSM_PI_CTRL_KP_GAIN (9404L)
+#define TSM_PI_CTRL_SHL (9405L)
+#define TSM_PI_CTRL_SHL_VAL (9406L)
+#define TSM_RSYNC_COUNT (9407L)
+#define TSM_RSYNC_COUNT_COUNT (9408L)
+#define TSM_STAT (9409L)
+#define TSM_STAT_EXT_SRC_OK (9410L)
+#define TSM_STAT_HARD_SYNC (9411L)
+#define TSM_STAT_INSYNC (9412L)
+#define TSM_STAT_LINK_ACTIVE (9413L)
+#define TSM_STAT_LINK_CON0 (9414L)
+#define TSM_STAT_LINK_CON1 (9415L)
+#define TSM_STAT_LINK_CON2 (9416L)
+#define TSM_STAT_LINK_CON3 (9417L)
+#define TSM_STAT_LINK_CON4 (9418L)
+#define TSM_STAT_LINK_CON5 (9419L)
+#define TSM_STAT_NTTS_INSYNC (9420L)
+#define TSM_STAT_PTP_MI_PRESENT (9421L)
+#define TSM_TIMER_CTRL (9422L)
+#define TSM_TIMER_CTRL_TIMER_EN_T0 (9423L)
+#define TSM_TIMER_CTRL_TIMER_EN_T1 (9424L)
+#define TSM_TIMER_CTRL_TRIGGER_SEL (9425L)
+#define TSM_TIMER_D_T0 (9426L)
+#define TSM_TIMER_D_T0_MAX_COUNT (9427L)
+#define TSM_TIMER_T0 (9428L)
+#define TSM_TIMER_T0_MAX_COUNT (9429L)
+#define TSM_TIMER_T1 (9430L)
+#define TSM_TIMER_T1_MAX_COUNT (9431L)
+#define TSM_TIMESTAMP_HI (9432L)
+#define TSM_TIMESTAMP_HI_TIME (9433L)
+#define TSM_TIMESTAMP_LO (9434L)
+#define TSM_TIMESTAMP_LO_TIME (9435L)
+#define TSM_TIME_HARDSET_HI (9436L)
+#define TSM_TIME_HARDSET_HI_TIME (9437L)
+#define TSM_TIME_HARDSET_LO (9438L)
+#define TSM_TIME_HARDSET_LO_TIME (9439L)
+#define TSM_TIME_HI (9440L)
+#define TSM_TIME_HI_SEC (9441L)
+#define TSM_TIME_HI_TIME (9442L)
+#define TSM_TIME_LO (9443L)
+#define TSM_TIME_LO_NS (9444L)
+#define TSM_TIME_RATE_ADJ (9445L)
+#define TSM_TIME_RATE_ADJ_FRACTION (9446L)
+#define TSM_TS_HI (9447L)
+#define TSM_TS_HI_TIME (9448L)
+#define TSM_TS_LO (9449L)
+#define TSM_TS_LO_TIME (9450L)
+#define TSM_TS_OFFSET (9451L)
+#define TSM_TS_OFFSET_NS (9452L)
+#define TSM_TS_STAT (9453L)
+#define TSM_TS_STAT_OVERRUN (9454L)
+#define TSM_TS_STAT_SAMPLES (9455L)
+#define TSM_TS_STAT_HI_OFFSET (9456L)
+#define TSM_TS_STAT_HI_OFFSET_NS (9457L)
+#define TSM_TS_STAT_LO_OFFSET (9458L)
+#define TSM_TS_STAT_LO_OFFSET_NS (9459L)
+#define TSM_TS_STAT_TAR_HI (9460L)
+#define TSM_TS_STAT_TAR_HI_SEC (9461L)
+#define TSM_TS_STAT_TAR_LO (9462L)
+#define TSM_TS_STAT_TAR_LO_NS (9463L)
+#define TSM_TS_STAT_X (9464L)
+#define TSM_TS_STAT_X_NS (9465L)
+#define TSM_TS_STAT_X2_HI (9466L)
+#define TSM_TS_STAT_X2_HI_NS (9467L)
+#define TSM_TS_STAT_X2_LO (9468L)
+#define TSM_TS_STAT_X2_LO_NS (9469L)
+#define TSM_UTC_OFFSET (9470L)
+#define TSM_UTC_OFFSET_SEC (9471L)
+
+#endif /* _NTHW_FPGA_REGISTERS_DEFS_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v16 2/8] net/ntnic: adds core registers and fpga functionality
  2023-09-08 16:07 ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
@ 2023-09-08 16:07   ` Mykola Kostenok
  2023-09-08 16:07   ` [PATCH v16 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
                     ` (6 subsequent siblings)
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-08 16:07 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

Includes functionality to reset, initialize, program, and collect
stats for the NTNIC FPGA.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v3:
* Fixed WARNING:TYPO_SPELLING
* Fix compilation for Fedora38
v9:
* Add missing header
---
 drivers/net/ntnic/include/nthw_bus.h          |   10 +
 drivers/net/ntnic/meson.build                 |   32 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |    9 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.h |   39 +
 drivers/net/ntnic/nthw/core/nthw_core.h       |   31 +
 drivers/net/ntnic/nthw/core/nthw_fpga.c       |  914 +++++++++
 drivers/net/ntnic/nthw/core/nthw_fpga.h       |   47 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.c  |   46 +
 .../net/ntnic/nthw/core/nthw_fpga_nt200a0x.h  |   14 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c   |   10 +
 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h   |   17 +
 .../net/ntnic/nthw/core/nthw_fpga_rst9563.c   |  241 +++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c  |  674 +++++++
 .../ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h  |   89 +
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c   |  271 +++
 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h   |   57 +
 drivers/net/ntnic/nthw/core/nthw_hif.c        |  342 ++++
 drivers/net/ntnic/nthw/core/nthw_hif.h        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_iic.c        |  570 ++++++
 drivers/net/ntnic/nthw/core/nthw_iic.h        |  101 +
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c    | 1034 ++++++++++
 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h    |  261 +++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.c    | 1631 ++++++++++++++++
 .../net/ntnic/nthw/core/nthw_mac_pcs_xxv.h    |  291 +++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c  |  121 ++
 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h  |   51 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.c     |   99 +
 drivers/net/ntnic/nthw/core/nthw_pci_ta.h     |   40 +
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c  |  127 ++
 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h  |   55 +
 drivers/net/ntnic/nthw/core/nthw_pcie3.c      |  274 +++
 drivers/net/ntnic/nthw/core/nthw_pcie3.h      |  100 +
 drivers/net/ntnic/nthw/core/nthw_sdc.c        |  177 ++
 drivers/net/ntnic/nthw/core/nthw_sdc.h        |   43 +
 drivers/net/ntnic/nthw/core/nthw_si5340.c     |  206 ++
 drivers/net/ntnic/nthw/core/nthw_si5340.h     |   34 +
 drivers/net/ntnic/nthw/core/nthw_spi_v3.c     |  380 ++++
 drivers/net/ntnic/nthw/core/nthw_spi_v3.h     |  106 ++
 drivers/net/ntnic/nthw/core/nthw_spim.c       |  117 ++
 drivers/net/ntnic/nthw/core/nthw_spim.h       |   52 +
 drivers/net/ntnic/nthw/core/nthw_spis.c       |  147 ++
 drivers/net/ntnic/nthw/core/nthw_spis.h       |   63 +
 drivers/net/ntnic/nthw/core/nthw_tsm.c        |  179 ++
 drivers/net/ntnic/nthw/core/nthw_tsm.h        |   53 +
 drivers/net/ntnic/nthw/nthw_dbs.c             | 1301 +++++++++++++
 drivers/net/ntnic/nthw/nthw_dbs.h             |  313 +++
 drivers/net/ntnic/nthw/nthw_drv.h             |   82 +
 drivers/net/ntnic/nthw/nthw_epp.c             |  335 ++++
 drivers/net/ntnic/nthw/nthw_epp.h             |   99 +
 drivers/net/ntnic/nthw/nthw_fpga_model.c      | 1677 +++++++++++++++++
 drivers/net/ntnic/nthw/nthw_fpga_model.h      |  308 +++
 drivers/net/ntnic/nthw/nthw_helper.h          |   21 +
 drivers/net/ntnic/nthw/nthw_platform.c        |   35 +
 drivers/net/ntnic/nthw/nthw_platform_drv.h    |   42 +
 drivers/net/ntnic/nthw/nthw_profile.h         |   15 +
 drivers/net/ntnic/nthw/nthw_rac.c             |  976 ++++++++++
 drivers/net/ntnic/nthw/nthw_rac.h             |  161 ++
 drivers/net/ntnic/nthw/nthw_register.h        |    2 +
 drivers/net/ntnic/nthw/nthw_stat.c            |  266 +++
 drivers/net/ntnic/nthw/nthw_stat.h            |   72 +
 drivers/net/ntnic/ntlog/include/ntlog.h       |  162 ++
 drivers/net/ntnic/ntlog/ntlog.c               |  115 ++
 drivers/net/ntnic/ntutil/include/nt_util.h    |   72 +
 drivers/net/ntnic/ntutil/nt_util.c            |   77 +
 64 files changed, 15442 insertions(+)
 create mode 100644 drivers/net/ntnic/include/nthw_bus.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_core.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_hif.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_iic.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_ta.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_pcie3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_sdc.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_si5340.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spi_v3.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spim.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_spis.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_tsm.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_dbs.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_epp.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_fpga_model.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_helper.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_platform_drv.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_profile.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_rac.h
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.c
 create mode 100644 drivers/net/ntnic/nthw/nthw_stat.h
 create mode 100644 drivers/net/ntnic/ntlog/include/ntlog.h
 create mode 100644 drivers/net/ntnic/ntlog/ntlog.c
 create mode 100644 drivers/net/ntnic/ntutil/include/nt_util.h
 create mode 100644 drivers/net/ntnic/ntutil/nt_util.c

diff --git a/drivers/net/ntnic/include/nthw_bus.h b/drivers/net/ntnic/include/nthw_bus.h
new file mode 100644
index 0000000000..975cc95e78
--- /dev/null
+++ b/drivers/net/ntnic/include/nthw_bus.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_BUS_H__
+#define __NTHW_BUS_H__
+
+typedef uint8_t rab_bus_id_t;
+
+#endif /* __NTHW_BUS_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 1194ce6aea..428fc7af98 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -11,13 +11,45 @@ endif
 includes = [
     include_directories('.'),
     include_directories('include'),
+    include_directories('ntlog/include'),
+    include_directories('ntutil/include'),
     include_directories('nthw'),
+    include_directories('nthw/core'),
     include_directories('nthw/supported'),
 ]
 
 # all sources
 sources = files(
+    'nthw/core/nthw_clock_profiles.c',
+    'nthw/core/nthw_fpga.c',
+    'nthw/core/nthw_fpga_nt200a0x.c',
+    'nthw/core/nthw_fpga_rst.c',
+    'nthw/core/nthw_fpga_rst9563.c',
+    'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gpio_phy.c',
+    'nthw/core/nthw_hif.c',
+    'nthw/core/nthw_iic.c',
+    'nthw/core/nthw_mac_pcs.c',
+    'nthw/core/nthw_mac_pcs_xxv.c',
+    'nthw/core/nthw_pci_rd_tg.c',
+    'nthw/core/nthw_pci_ta.c',
+    'nthw/core/nthw_pci_wr_tg.c',
+    'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_sdc.c',
+    'nthw/core/nthw_si5340.c',
+    'nthw/core/nthw_spi_v3.c',
+    'nthw/core/nthw_spim.c',
+    'nthw/core/nthw_spis.c',
+    'nthw/core/nthw_tsm.c',
+    'nthw/nthw_fpga_model.c',
+    'nthw/nthw_dbs.c',
+    'nthw/nthw_epp.c',
+    'nthw/nthw_platform.c',
+    'nthw/nthw_rac.c',
+    'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
+    'ntlog/ntlog.c',
+    'ntutil/nt_util.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
new file mode 100644
index 0000000000..efdcc222a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_clock_profiles.h"
+
+/* Clock profile for NT200A02 2x40G, 2x100G */
+const int n_data_si5340_nt200a02_u23_v5;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
new file mode 100644
index 0000000000..4252f69e92
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CLOCK_PROFILES_H__
+#define __NTHW_CLOCK_PROFILES_H__
+
+#include <stdint.h>
+
+#include "nthw_helper.h"
+
+#define clk_profile_size_error_msg "size test failed"
+
+typedef struct {
+	unsigned char reg_addr;
+	unsigned char reg_val;
+	unsigned char reg_mask;
+} clk_profile_data_fmt0_t;
+
+typedef struct {
+	uint16_t reg_addr;
+	uint8_t reg_val;
+} clk_profile_data_fmt1_t;
+
+typedef struct {
+	unsigned int reg_addr;
+	unsigned char reg_val;
+} clk_profile_data_fmt2_t;
+
+typedef enum {
+	CLK_PROFILE_DATA_FMT_0,
+	CLK_PROFILE_DATA_FMT_1,
+	CLK_PROFILE_DATA_FMT_2
+} clk_profile_data_fmt_t;
+
+extern const int n_data_si5340_nt200a02_u23_v5;
+extern const  clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+
+#endif /* __NTHW_CLOCK_PROFILES_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
new file mode 100644
index 0000000000..798a95d5cf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_CORE_H__
+#define __NTHW_CORE_H__
+
+#include "nthw_helper.h"
+
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_hif.h"
+#include "nthw_pcie3.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_iic.h"
+
+#include "nthw_gpio_phy.h"
+#include "nthw_mac_pcs.h"
+#include "nthw_mac_pcs_xxv.h"
+#include "nthw_sdc.h"
+
+#include "nthw_spim.h"
+#include "nthw_spis.h"
+
+#include "nthw_tsm.h"
+
+#include "nthw_si5340.h"
+
+#endif /* __NTHW_CORE_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c
new file mode 100644
index 0000000000..646d45b7eb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_instances.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga)
+{
+	const int n_nims = fpga_get_product_param(p_fpga, NT_NIMS, -1);
+	const int n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, -1);
+	const int n_phy_quads = fpga_get_product_param(p_fpga, NT_PHY_QUADS, -1);
+	const int n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, -1);
+	const int n_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, -1);
+
+	p_fpga_info->n_nims = n_nims;
+	p_fpga_info->n_phy_ports = n_phy_ports;
+	p_fpga_info->n_phy_quads = n_phy_quads;
+	p_fpga_info->n_rx_ports = n_rx_ports;
+	p_fpga_info->n_tx_ports = n_tx_ports;
+	p_fpga_info->profile = FPGA_INFO_PROFILE_UNKNOWN;
+
+	/* Check for VSWITCH FPGA */
+	if (fpga_get_product_param(p_fpga, NT_NFV_OVS_PRODUCT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+	/* Check for VSWITCH FPGA - legacy */
+	else if (fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_VSWITCH;
+
+	else if (fpga_get_product_param(p_fpga, NT_QM_PRESENT, 0) != 0)
+		p_fpga_info->profile = FPGA_INFO_PROFILE_CAPTURE;
+
+	else
+		p_fpga_info->profile = FPGA_INFO_PROFILE_INLINE;
+
+	return 0;
+}
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const uint8_t n_dev_addr, const uint8_t n_reg_addr)
+{
+	nthw_iic_t nthw_iic;
+	uint8_t val = 0;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_readbyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) == 0)
+		return val;
+
+	else
+		return -1;
+}
+
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			    uint8_t val)
+{
+	nthw_iic_t nthw_iic;
+
+	if (nthw_iic_init(&nthw_iic, p_fpga, n_instance_no, 8) != 0)
+		return -1;
+
+	if (nthw_iic_writebyte(&nthw_iic, n_dev_addr, n_reg_addr, 1, &val) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		      const int n_instance_no_end)
+{
+	int i;
+
+	assert(n_instance_no_begin <= n_instance_no_end);
+
+	for (i = n_instance_no_begin; i <= n_instance_no_end; i++) {
+		nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+		if (p_nthw_iic) {
+			const int rc = nthw_iic_init(p_nthw_iic, p_fpga, i, 8);
+
+			if (rc == 0) {
+				nthw_iic_set_retry_params(p_nthw_iic, -1, 100, 100,
+						       3, 3);
+				nthw_iic_scan(p_nthw_iic);
+			}
+			nthw_iic_delete(p_nthw_iic);
+			p_nthw_iic = NULL;
+		}
+	}
+	return 0;
+}
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			   const int n_dev_addr, const int n_page_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t ident = -1;
+	int res = -1;
+
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+
+	if (p_nthw_iic) {
+		uint8_t data;
+		uint8_t a_silabs_ident[8];
+
+		nthw_iic_init(p_nthw_iic, p_fpga, n_instance_no, 8);
+
+		data = 0;
+		/* switch to page 0 */
+		nthw_iic_write_data(p_nthw_iic, (uint8_t)n_dev_addr,
+				  (uint8_t)n_page_reg_addr, 1, &data);
+		res = nthw_iic_read_data(p_nthw_iic, (uint8_t)n_dev_addr, 0x00,
+				       sizeof(a_silabs_ident), a_silabs_ident);
+		if (res == 0) {
+			int i;
+
+			for (i = 0; i < (int)sizeof(a_silabs_ident); i++) {
+				ident <<= 8;
+				ident |= a_silabs_ident[i];
+			}
+		}
+		nthw_iic_delete(p_nthw_iic);
+		p_nthw_iic = NULL;
+
+		/* Conclude SiLabs part */
+		if (res == 0) {
+			if (a_silabs_ident[3] == 0x53) {
+				if (a_silabs_ident[2] == 0x40)
+					res = 5340;
+				else if (a_silabs_ident[2] == 0x41)
+					res = 5341;
+			} else if (a_silabs_ident[2] == 38) {
+				res = 5338;
+			} else {
+				res = -1;
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %016" PRIX64 ": %d\n", p_adapter_id_str, ident,
+	       res);
+	return res;
+}
+
+/*
+ * Calculate CRC-16-CCITT of passed data
+ * CRC-16-CCITT ^16 + ^12 + ^5 + 1 (0x1021) (X.25, HDLC, XMODEM, Bluetooth,
+ *   SD, many others; known as CRC-CCITT)
+ */
+static uint16_t crc16(uint8_t *buffer, size_t length)
+{
+	uint16_t seed = 0;
+
+	while (length--) {
+		seed = (uint16_t)(seed >> 8 | seed << 8);
+		seed = (uint16_t)(seed ^ *buffer++);
+		seed = (uint16_t)(seed ^ (seed & 0xff) >> 4);
+		seed = (uint16_t)(seed ^ seed << 8 << 4);
+		seed = (uint16_t)(seed ^ (seed & 0xff) << 4 << 1);
+	}
+	return seed;
+}
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no)
+{
+	struct fpga_info_s *p_fpga_info = p_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nthw_spi_v3_t *p_avr_spi;
+	int res = -1;
+
+	p_avr_spi = nthw_spi_v3_new();
+	if (p_avr_spi) {
+		struct avr_vpd_info_s {
+			/* avr info */
+			uint32_t n_avr_spi_version;
+			uint8_t n_avr_fw_ver_major;
+			uint8_t n_avr_fw_ver_minor;
+			uint8_t n_avr_fw_ver_micro;
+			uint8_t a_avr_fw_ver_str[50];
+			uint8_t a_avr_fw_plat_id_str[20];
+
+			/* vpdEeprom_t */
+			uint8_t psu_hw_version;
+			uint8_t vpd_pn[GEN2_PN_SIZE];
+			uint8_t vpd_pba[GEN2_PBA_SIZE];
+			uint8_t vpd_sn[GEN2_SN_SIZE];
+			uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+			uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+
+			/* BoardInfo_t aka vpd_platform_section: */
+			uint32_t product_family; /* uint8_t 1: capture, 2: Inline, 3: analysis */
+			uint32_t feature_mask; /* Bit 0: OC192 capable */
+			uint32_t invfeature_mask;
+			uint8_t no_of_macs;
+			uint8_t mac_address[6];
+			uint16_t custom_id;
+			uint8_t user_id[8];
+			/*
+			 * Reserved NT operations to monitor the reprogram count of userId with
+			 * vpduser
+			 */
+			uint16_t user_id_erase_write_count;
+
+			/*
+			 * AVR_OP_SYSINFO: struct version_sysinfo_request_container
+			 * Which version of the sysinfo container to retrieve. Set to zero to fetch
+			 * latest. offset zero of latest always contain an uint8_t version info
+			 */
+			uint8_t sysinfo_container_version;
+
+			/* AVR_OP_SYSINFO: struct AvrLibcVersion */
+			uint32_t sysinfo_avr_libc_version; /* The constant __AVR_LIBC_VERSION__ */
+
+			/* AVR_OP_SYSINFO: struct AvrLibcSignature */
+			uint8_t sysinfo_signature_0; /* The constant SIGNATURE_0 */
+			uint8_t sysinfo_signature_1; /* The constant SIGNATURE_1 */
+			uint8_t sysinfo_signature_2; /* The constant SIGNATURE_2 */
+
+			/* AVR_OP_SYSINFO: struct AvrOs */
+			uint8_t sysinfo_spi_version; /* SPI command layer version */
+			/*
+			 * Hardware revision. Locked to eeprom address zero. Is also available via
+			 * VPD read opcode (prior to v1.4b, this is required)
+			 */
+			uint8_t sysinfo_hw_revision;
+			/*
+			 * Number of ticks/second (Note: Be aware this may become zero if timer
+			 * module is rewritten to a tickles system!)
+			 */
+			uint8_t sysinfo_ticks_per_second;
+			uint32_t sysinfo_uptime; /* Uptime in seconds since last AVR reset */
+			uint8_t sysinfo_osccal; /* OSCCAL value */
+
+			/*
+			 * Meta data concluded/calculated from req/reply
+			 */
+			bool b_feature_mask_valid;
+			bool b_crc16_valid;
+			uint16_t n_crc16_stored;
+			uint16_t n_crc16_calced;
+			uint64_t n_mac_val;
+		};
+
+		struct avr_vpd_info_s avr_vpd_info;
+		struct tx_rx_buf tx_buf;
+		struct tx_rx_buf rx_buf;
+		char rx_data[MAX_AVR_CONTAINER_SIZE];
+		uint32_t u32;
+
+		memset(&avr_vpd_info, 0, sizeof(avr_vpd_info));
+
+		nthw_spi_v3_init(p_avr_spi, p_fpga, n_instance_no);
+
+		/* AVR_OP_SPI_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(u32);
+		rx_buf.p_buf = &u32;
+		u32 = 0;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SPI_VERSION, &tx_buf,
+					   &rx_buf);
+		avr_vpd_info.n_avr_spi_version = u32;
+		NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n", p_adapter_id_str,
+		       n_instance_no, avr_vpd_info.n_avr_spi_version);
+
+		/* AVR_OP_VERSION */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VERSION, &tx_buf,
+					   &rx_buf);
+
+		avr_vpd_info.n_avr_fw_ver_major = rx_data[0];
+		avr_vpd_info.n_avr_fw_ver_minor = rx_data[1];
+		avr_vpd_info.n_avr_fw_ver_micro = rx_data[2];
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER: %c.%c.%c\n",
+		       p_adapter_id_str, n_instance_no, avr_vpd_info.n_avr_fw_ver_major,
+		       avr_vpd_info.n_avr_fw_ver_minor,
+		       avr_vpd_info.n_avr_fw_ver_micro);
+
+		memcpy(avr_vpd_info.a_avr_fw_ver_str, &rx_data[0 + 3],
+		       sizeof(avr_vpd_info.a_avr_fw_ver_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_VER_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_ver_str),
+		       avr_vpd_info.a_avr_fw_ver_str);
+
+		memcpy(avr_vpd_info.a_avr_fw_plat_id_str, &rx_data[0 + 3 + 50],
+		       sizeof(avr_vpd_info.a_avr_fw_plat_id_str));
+		NT_LOG(DBG, NTHW, "%s: AVR%d: FW_HW_ID_STR: '%.*s'\n",
+		       p_adapter_id_str, n_instance_no,
+		       (int)sizeof(avr_vpd_info.a_avr_fw_plat_id_str),
+		       avr_vpd_info.a_avr_fw_plat_id_str);
+
+		rte_strscpy(p_fpga_info->nthw_hw_info.hw_plat_id_str,
+			(char *)avr_vpd_info.a_avr_fw_plat_id_str,
+			sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str));
+		p_fpga_info->nthw_hw_info.hw_plat_id_str
+		[sizeof(p_fpga_info->nthw_hw_info.hw_plat_id_str) - 1] =
+			0;
+
+		/* AVR_OP_SYSINFO_2 */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO_2, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= 16) {
+			if (rx_buf.size != 16) {
+				NT_LOG(WRN, NTHW,
+				       "%s: AVR%d: SYSINFO2: reply is larger than expected: %04X %04X\n",
+				       p_adapter_id_str, n_instance_no, rx_buf.size,
+				       16);
+			} else {
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO2: OK: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+
+			avr_vpd_info.sysinfo_container_version = rx_data[0];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_container_version);
+
+			memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+			       &rx_data[0 + 1],
+			       sizeof(avr_vpd_info.sysinfo_avr_libc_version));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_avr_libc_version);
+
+			avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+			avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+			avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+			NT_LOG(DBG, NTHW,
+			       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_signature_0,
+			       avr_vpd_info.sysinfo_signature_1,
+			       avr_vpd_info.sysinfo_signature_2);
+
+			avr_vpd_info.sysinfo_spi_version = rx_data[8];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_spi_version);
+
+			avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_hw_revision);
+
+			avr_vpd_info.sysinfo_ticks_per_second = rx_data[10];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: TICKS_PER_SEC: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_ticks_per_second);
+
+			memcpy(&avr_vpd_info.sysinfo_uptime, &rx_data[11],
+			       sizeof(avr_vpd_info.sysinfo_uptime));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_uptime);
+
+			avr_vpd_info.sysinfo_osccal = rx_data[15];
+			NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.sysinfo_osccal);
+
+			{
+				bool b_spi_ver_match _unused =
+					(avr_vpd_info.n_avr_spi_version ==
+					 avr_vpd_info.sysinfo_spi_version);
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+				       p_adapter_id_str, n_instance_no,
+				       (b_spi_ver_match ? "OK" : "MISMATCH"),
+				       avr_vpd_info.n_avr_spi_version,
+				       avr_vpd_info.sysinfo_spi_version);
+			}
+			/* SYSINFO2: if response: only populate hw_id not hw_id_emulated */
+			p_fpga_info->nthw_hw_info.hw_id =
+				avr_vpd_info.sysinfo_hw_revision;
+		} else {
+			/* AVR_OP_SYSINFO */
+			tx_buf.size = 0;
+			tx_buf.p_buf = NULL;
+			rx_buf.size = sizeof(rx_data);
+			rx_buf.p_buf = &rx_data;
+			res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_SYSINFO,
+						   &tx_buf, &rx_buf);
+			if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+					rx_buf.size >= 16) {
+				if (rx_buf.size != 16) {
+					NT_LOG(WRN, NTHW,
+					       "%s: AVR%d: SYSINFO: reply is larger than expected: %04X %04X\n",
+					       p_adapter_id_str, n_instance_no,
+					       rx_buf.size, 16);
+				} else {
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SYSINFO: OK: res=%d sz=%d\n",
+					       p_adapter_id_str, n_instance_no, res,
+					       rx_buf.size);
+				}
+
+				avr_vpd_info.sysinfo_container_version =
+					rx_data[0];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SYSINFO_REQ_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_container_version);
+
+				memcpy(&avr_vpd_info.sysinfo_avr_libc_version,
+				       &rx_data[0 + 1],
+				       sizeof(avr_vpd_info
+					      .sysinfo_avr_libc_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: LIBC_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_avr_libc_version);
+
+				avr_vpd_info.sysinfo_signature_0 = rx_data[5];
+				avr_vpd_info.sysinfo_signature_1 = rx_data[6];
+				avr_vpd_info.sysinfo_signature_2 = rx_data[7];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: SIGNATURE: %02x%02x%02x\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_signature_0,
+				       avr_vpd_info.sysinfo_signature_1,
+				       avr_vpd_info.sysinfo_signature_2);
+
+				avr_vpd_info.sysinfo_spi_version = rx_data[8];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SPI_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_spi_version);
+
+				avr_vpd_info.sysinfo_hw_revision = rx_data[9];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+				NT_LOG(INF, NTHW, "%s: AVR%d: HW_REV: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_hw_revision);
+
+				avr_vpd_info.sysinfo_ticks_per_second =
+					rx_data[10];
+				NT_LOG(DBG, NTHW,
+				       "%s: AVR%d: TICKS_PER_SEC: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_ticks_per_second);
+
+				memcpy(&avr_vpd_info.sysinfo_uptime,
+				       &rx_data[11],
+				       sizeof(avr_vpd_info.sysinfo_uptime));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: UPTIME: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_uptime);
+
+				avr_vpd_info.sysinfo_osccal = rx_data[15];
+				NT_LOG(DBG, NTHW, "%s: AVR%d: OSCCAL: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.sysinfo_osccal);
+
+				{
+					bool b_spi_ver_match _unused =
+						(avr_vpd_info.n_avr_spi_version ==
+						 avr_vpd_info
+						 .sysinfo_spi_version);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: SPI_VER_TST: %s (%d %d)\n",
+					       p_adapter_id_str, n_instance_no,
+					       (b_spi_ver_match ? "OK" :
+						"MISMATCH"),
+					       avr_vpd_info.n_avr_spi_version,
+					       avr_vpd_info.sysinfo_spi_version);
+				}
+
+				p_fpga_info->nthw_hw_info.hw_id =
+					avr_vpd_info.sysinfo_hw_revision;
+				p_fpga_info->nthw_hw_info.hw_id_emulated =
+					avr_vpd_info.sysinfo_hw_revision;
+			} else {
+				NT_LOG(ERR, NTHW,
+				       "%s: AVR%d: SYSINFO: NA: res=%d sz=%d\n",
+				       p_adapter_id_str, n_instance_no, res,
+				       rx_buf.size);
+			}
+		}
+
+		/* AVR_OP_VPD_READ */
+		tx_buf.size = 0;
+		tx_buf.p_buf = NULL;
+		rx_buf.size = sizeof(rx_data);
+		rx_buf.p_buf = &rx_data;
+		res = nthw_spi_v3_transfer(p_avr_spi, AVR_OP_VPD_READ, &tx_buf,
+					   &rx_buf);
+		if (res == 0 && avr_vpd_info.n_avr_spi_version >= 3 &&
+				rx_buf.size >= GEN2_VPD_SIZE_TOTAL) {
+			avr_vpd_info.n_crc16_calced =
+				crc16(rx_buf.p_buf, rx_buf.size - 2);
+			memcpy(&avr_vpd_info.n_crc16_stored,
+			       &rx_data[rx_buf.size - 2],
+			       sizeof(avr_vpd_info.n_crc16_stored));
+			NT_LOG(DBG, NTHW, "%s: AVR%d: VPD_CRC: %04X %04X\n",
+			       p_adapter_id_str, n_instance_no,
+			       avr_vpd_info.n_crc16_stored,
+			       avr_vpd_info.n_crc16_calced);
+
+			avr_vpd_info.b_crc16_valid = (avr_vpd_info.n_crc16_stored ==
+						    avr_vpd_info.n_crc16_calced);
+			NT_LOG(DBG, NTHW, "%s: AVR%d: CRC_TST: %s\n",
+			       p_adapter_id_str, n_instance_no,
+			       (avr_vpd_info.b_crc16_valid ? "OK" : "ERROR"));
+
+			if (avr_vpd_info.b_crc16_valid) {
+				memcpy(&avr_vpd_info.psu_hw_version, &rx_data[0],
+				       sizeof(avr_vpd_info.psu_hw_version));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PSU_HW_VER: %d\n",
+				       p_adapter_id_str, n_instance_no,
+				       avr_vpd_info.psu_hw_version);
+
+				memcpy(&avr_vpd_info.vpd_pn, &rx_data[0 + 1],
+				       sizeof(avr_vpd_info.vpd_pn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_PN_SIZE,
+				       avr_vpd_info.vpd_pn);
+
+				memcpy(&avr_vpd_info.vpd_pba,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE],
+				       sizeof(avr_vpd_info.vpd_pba));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: PBA: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_PBA_SIZE, avr_vpd_info.vpd_pba);
+
+				memcpy(&avr_vpd_info.vpd_sn,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE],
+				       sizeof(avr_vpd_info.vpd_sn));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: SN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no, GEN2_SN_SIZE,
+				       avr_vpd_info.vpd_sn);
+
+				memcpy(&avr_vpd_info.vpd_board_name,
+				       &rx_data[0 + 1 + GEN2_PN_SIZE +
+						 GEN2_PBA_SIZE + GEN2_SN_SIZE],
+				       sizeof(avr_vpd_info.vpd_board_name));
+				NT_LOG(DBG, NTHW, "%s: AVR%d: BN: '%.*s'\n",
+				       p_adapter_id_str, n_instance_no,
+				       GEN2_BNAME_SIZE,
+				       avr_vpd_info.vpd_board_name);
+
+				{
+					uint32_t u1;
+					union mac_u {
+						uint8_t a_u8[8];
+						uint16_t a_u16[4];
+						uint32_t a_u32[2];
+						uint64_t a_u64[1];
+					} mac;
+
+					 /* vpd_platform_section */
+					uint8_t *p_vpd_board_info =
+						(uint8_t *)(&rx_data[1 +
+								      GEN2_PN_SIZE +
+								      GEN2_PBA_SIZE +
+								      GEN2_SN_SIZE +
+								      GEN2_BNAME_SIZE]);
+					memcpy(&avr_vpd_info.product_family,
+					       &p_vpd_board_info[0],
+					       sizeof(avr_vpd_info
+						      .product_family));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: PROD_FAM: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.product_family);
+
+					memcpy(&avr_vpd_info.feature_mask,
+					       &p_vpd_board_info[0 + 4],
+					       sizeof(avr_vpd_info.feature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_VAL: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.feature_mask);
+
+					memcpy(&avr_vpd_info.invfeature_mask,
+					       &p_vpd_board_info[0 + 4 + 4],
+					       sizeof(avr_vpd_info
+						      .invfeature_mask));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_INV: 0x%08X\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.invfeature_mask);
+
+					avr_vpd_info.b_feature_mask_valid =
+						(avr_vpd_info.feature_mask ==
+						 ~avr_vpd_info.invfeature_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: FMSK_TST: %s\n",
+					       p_adapter_id_str, n_instance_no,
+					       (avr_vpd_info.b_feature_mask_valid ?
+						"OK" :
+						"ERROR"));
+
+					memcpy(&avr_vpd_info.no_of_macs,
+					       &p_vpd_board_info[0 + 4 + 4 + 4],
+					       sizeof(avr_vpd_info.no_of_macs));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: NUM_MACS: %d\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.no_of_macs);
+
+					memcpy(&avr_vpd_info.mac_address,
+					       &p_vpd_board_info[0 + 4 + 4 + 4 + 1],
+					       sizeof(avr_vpd_info.mac_address));
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.mac_address[0],
+					       avr_vpd_info.mac_address[1],
+					       avr_vpd_info.mac_address[2],
+					       avr_vpd_info.mac_address[3],
+					       avr_vpd_info.mac_address[4],
+					       avr_vpd_info.mac_address[5]);
+
+					mac.a_u64[0] = 0;
+					memcpy(&mac.a_u8[2],
+					       &avr_vpd_info.mac_address,
+					       sizeof(avr_vpd_info.mac_address));
+					u1 = ntohl(mac.a_u32[0]);
+					if (u1 != mac.a_u32[0]) {
+						const uint32_t u0 = ntohl(mac.a_u32[1]);
+						mac.a_u32[0] = u0;
+						mac.a_u32[1] = u1;
+					}
+					avr_vpd_info.n_mac_val = mac.a_u64[0];
+					NT_LOG(DBG, NTHW,
+					       "%s: AVR%d: MAC_U64: %012" PRIX64
+					       "\n",
+					       p_adapter_id_str, n_instance_no,
+					       avr_vpd_info.n_mac_val);
+				}
+			}
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count =
+				avr_vpd_info.no_of_macs;
+			p_fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value =
+				avr_vpd_info.n_mac_val;
+			memcpy(p_fpga_info->nthw_hw_info.vpd_info.ma_mac_addr_octets,
+			       avr_vpd_info.mac_address,
+			       ARRAY_SIZE(p_fpga_info->nthw_hw_info.vpd_info
+					  .ma_mac_addr_octets));
+		} else {
+			NT_LOG(ERR, NTHW, "%s:%u: res=%d\n", __func__, __LINE__,
+			       res);
+			NT_LOG(ERR, NTHW,
+			       "%s: AVR%d: SYSINFO2: NA: res=%d sz=%d\n",
+			       p_adapter_id_str, n_instance_no, res, rx_buf.size);
+		}
+	}
+
+	return res;
+}
+
+/*
+ * NT50B01, NT200A02, NT200A01-HWbuild2
+ */
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt)
+{
+	int res;
+	nthw_iic_t *p_nthw_iic = nthw_iic_new();
+	nthw_si5340_t *p_nthw_si5340 = nthw_si5340_new();
+
+	assert(p_nthw_iic);
+	assert(p_nthw_si5340);
+	nthw_iic_init(p_nthw_iic, p_fpga, 0, 8); /* I2C cycle time 125Mhz ~ 8ns */
+
+	nthw_si5340_init(p_nthw_si5340, p_nthw_iic,
+			n_iic_addr); /* Si5340_U23_I2c_Addr_7bit */
+	res = nthw_si5340_config_fmt2(p_nthw_si5340, p_clk_profile,
+				    n_clk_profile_rec_cnt);
+	nthw_si5340_delete(p_nthw_si5340);
+	p_nthw_si5340 = NULL;
+
+	return res;
+}
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info)
+{
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	nthw_hif_t *p_nthw_hif = NULL;
+	nthw_pcie3_t *p_nthw_pcie3 = NULL;
+	nthw_rac_t *p_nthw_rac = NULL;
+	nthw_tsm_t *p_nthw_tsm = NULL;
+
+	uint64_t n_fpga_ident = 0;
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+
+	char s_fpga_prod_ver_rev_str[32] = { 0 };
+
+	int res = 0;
+
+	assert(p_fpga_info);
+
+	{
+		int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+		uint64_t n_fpga_ident;
+		uint32_t n_fpga_ident_low, n_fpga_ident_high, n_fpga_build_time;
+
+		nthw_rac_reg_read32(p_fpga_info, 0x0, &n_fpga_ident_low);
+		nthw_rac_reg_read32(p_fpga_info, 0x8, &n_fpga_ident_high);
+		nthw_rac_reg_read32(p_fpga_info, 0x10, &n_fpga_build_time);
+
+		n_fpga_ident = (((uint64_t)n_fpga_ident_high << 32) | n_fpga_ident_low);
+		n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+		n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+		n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+		n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+		p_fpga_info->n_fpga_ident = n_fpga_ident;
+		p_fpga_info->n_fpga_type_id = n_fpga_type_id;
+		p_fpga_info->n_fpga_prod_id = n_fpga_prod_id;
+		p_fpga_info->n_fpga_ver_id = n_fpga_ver_id;
+		p_fpga_info->n_fpga_rev_id = n_fpga_rev_id;
+		p_fpga_info->n_fpga_build_time = n_fpga_build_time;
+
+		snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+			 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id,
+			 n_fpga_ver_id, n_fpga_rev_id);
+
+		NT_LOG(INF, NTHW, "%s: FPGA %s (%" PRIX64 ") [%08X]\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str, n_fpga_ident,
+		       n_fpga_build_time);
+	}
+
+	n_fpga_ident = p_fpga_info->n_fpga_ident;
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	fpga_mgr_log_dump(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+	p_fpga_info->mp_fpga = p_fpga;
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	/* Read Fpga param info */
+	nthw_fpga_get_param_info(p_fpga_info, p_fpga);
+
+	/* debug: report params */
+	NT_LOG(DBG, NTHW, "%s: NT_NIMS=%d\n", p_adapter_id_str, p_fpga_info->n_nims);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_PHY_QUADS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_phy_quads);
+	NT_LOG(DBG, NTHW, "%s: NT_RX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_rx_ports);
+	NT_LOG(DBG, NTHW, "%s: NT_TX_PORTS=%d\n", p_adapter_id_str,
+	       p_fpga_info->n_tx_ports);
+	NT_LOG(DBG, NTHW, "%s: nProfile=%d\n", p_adapter_id_str,
+	       (int)p_fpga_info->profile);
+
+	p_nthw_rac = nthw_rac_new();
+	if (p_nthw_rac == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Unsupported FPGA: RAC is not found: %s (%08X)\n",
+		       p_adapter_id_str, s_fpga_prod_ver_rev_str,
+		       p_fpga_info->n_fpga_build_time);
+		return -1;
+	}
+
+	nthw_rac_init(p_nthw_rac, p_fpga, p_fpga_info);
+	nthw_rac_rab_flush(p_nthw_rac);
+	p_fpga_info->mp_nthw_rac = p_nthw_rac;
+
+	/* special case: values below 0x100 will disable debug on RAC communication */
+	{
+		const int n_fpga_initial_debug_mode = p_fpga_info->n_fpga_debug_mode;
+
+		fpga_set_debug_mode(p_fpga, n_fpga_initial_debug_mode);
+	}
+
+	switch (p_fpga_info->n_nthw_adapter_id) {
+	case NT_HW_ADAPTER_ID_NT200A01: /* fallthrough */
+	case NT_HW_ADAPTER_ID_NT200A02:
+		res = nthw_fpga_nt200a0x_init(p_fpga_info);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported HW product id: %d\n",
+		       p_adapter_id_str, p_fpga_info->n_nthw_adapter_id);
+		res = -1;
+		break;
+	}
+
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: status: 0x%08X\n", p_adapter_id_str, res);
+		return res;
+	}
+
+	res = nthw_pcie3_init(NULL, p_fpga, 0); /* Probe for module */
+	if (res == 0) {
+		p_nthw_pcie3 = nthw_pcie3_new();
+		if (p_nthw_pcie3) {
+			res = nthw_pcie3_init(p_nthw_pcie3, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Pcie3 module found\n",
+				       p_adapter_id_str);
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			} else {
+				nthw_pcie3_delete(p_nthw_pcie3);
+				p_nthw_pcie3 = NULL;
+			}
+		}
+		p_fpga_info->mp_nthw_pcie3 = p_nthw_pcie3;
+	}
+
+	if (p_nthw_pcie3 == NULL) {
+		p_nthw_hif = nthw_hif_new();
+		if (p_nthw_hif) {
+			res = nthw_hif_init(p_nthw_hif, p_fpga, 0);
+			if (res == 0) {
+				NT_LOG(DBG, NTHW, "%s: Hif module found\n",
+				       p_adapter_id_str);
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+			} else {
+				nthw_hif_delete(p_nthw_hif);
+				p_nthw_hif = NULL;
+			}
+		}
+	}
+	p_fpga_info->mp_nthw_hif = p_nthw_hif;
+
+	p_nthw_tsm = nthw_tsm_new();
+	if (p_nthw_tsm) {
+		nthw_tsm_init(p_nthw_tsm, p_fpga, 0);
+
+		nthw_tsm_set_config_ts_format(p_nthw_tsm,
+					  1); /* 1 = TSM: TS format native */
+
+		/* Timer T0 - stat toggle timer */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true);
+
+		/* Timer T1 - keep alive timer */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false);
+		nthw_tsm_set_timer_t1_max_count(p_nthw_tsm,
+					   100 * 1000 * 1000); /* ns */
+		nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true);
+	}
+	p_fpga_info->mp_nthw_tsm = p_nthw_tsm;
+
+	/* TSM sample triggering: test validation... */
+#if defined(DEBUG) && (1)
+	{
+		uint64_t n_time, n_ts;
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			if (p_nthw_hif)
+				nthw_hif_trigger_sample_time(p_nthw_hif);
+
+			else if (p_nthw_pcie3)
+				nthw_pcie3_trigger_sample_time(p_nthw_pcie3);
+			nthw_tsm_get_time(p_nthw_tsm, &n_time);
+			nthw_tsm_get_ts(p_nthw_tsm, &n_ts);
+
+			NT_LOG(DBG, NTHW,
+			       "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n",
+			       p_adapter_id_str, n_time, n_ts);
+
+			NT_OS_WAIT_USEC(1000);
+		}
+	}
+#endif
+
+	return res;
+}
+
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info)
+{
+	int res = -1;
+
+	if (p_fpga_info) {
+		if (p_fpga_info && p_fpga_info->mp_nthw_rac)
+			res = nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.h b/drivers/net/ntnic/nthw/core/nthw_fpga.h
new file mode 100644
index 0000000000..336d81f337
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_H__
+#define __NTHW_FPGA_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_stat.h"
+
+#include "nthw_fpga_rst.h"
+
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_dbs.h"
+
+int nthw_fpga_init(struct fpga_info_s *p_fpga_info);
+int nthw_fpga_shutdown(struct fpga_info_s *p_fpga_info);
+
+int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nt_fpga_t *p_fpga);
+
+int nthw_fpga_avr_probe(nt_fpga_t *p_fpga, const int n_instance_no);
+
+int nthw_fpga_iic_scan(nt_fpga_t *p_fpga, const int n_instance_no_begin,
+		       const int n_instance_no_end);
+
+int nthw_fpga_iic_read_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const uint8_t n_dev_addr, const uint8_t n_reg_addr);
+int nthw_fpga_iic_write_byte(nt_fpga_t *p_fpga, const int n_instance_no,
+			     const uint8_t n_dev_addr, const uint8_t n_reg_addr,
+			     uint8_t val);
+
+int nthw_fpga_silabs_detect(nt_fpga_t *p_fpga, const int n_instance_no,
+			    const int n_dev_addr, const int n_page_reg_addr);
+
+int nthw_fpga_si5340_clock_synth_init_fmt2(nt_fpga_t *p_fpga,
+	const uint8_t n_iic_addr,
+	const clk_profile_data_fmt2_t *p_clk_profile,
+	const int n_clk_profile_rec_cnt);
+
+#endif /* __NTHW_FPGA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
new file mode 100644
index 0000000000..70338fdfd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	struct nthw_fpga_rst_nt200a0x rst;
+	int res = -1;
+
+	/* reset common */
+	res = nthw_fpga_rst_nt200a0x_init(p_fpga_info, &rst);
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	/* reset specific */
+	switch (p_fpga_info->n_fpga_prod_id) {
+	case 9563:
+		res = nthw_fpga_rst9563_init(p_fpga_info, &rst);
+		break;
+	default:
+		NT_LOG(ERR, NTHW, "%s: Unsupported FPGA product: %04d\n",
+		       p_adapter_id_str, p_fpga_info->n_fpga_prod_id);
+		res = -1;
+		break;
+	}
+	if (res) {
+		NT_LOG(ERR, NTHW, "%s: %s: loc=%u: FPGA=%04d res=%d\n",
+		       p_adapter_id_str, __func__, __LINE__,
+		       p_fpga_info->n_fpga_prod_id, res);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
new file mode 100644
index 0000000000..ff324bee39
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_nt200a0x.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_NT200A0X_H__
+#define __NTHW_FPGA_NT200A0X_H__
+
+int nthw_fpga_nt200a0x_init(struct fpga_info_s *p_fpga_info);
+
+/* NT200A02: 9563 */
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *const p);
+
+#endif /* __NTHW_FPGA_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
new file mode 100644
index 0000000000..66c148bab2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_fpga.h"
+#include "nthw_fpga_nt200a0x.h"
+
+#include "nthw_fpga_rst.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
new file mode 100644
index 0000000000..2099c4b677
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_H__
+#define __NTHW_FPGA_RST_H__
+
+#include "nthw_drv.h"
+
+#include "nthw_fpga_model.h"
+
+#include "nthw_rac.h"
+#include "nthw_iic.h"
+
+#include "nthw_fpga_rst_nt200a0x.h"
+
+#endif /* __NTHW_FPGA_RST_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
new file mode 100644
index 0000000000..077b043c60
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst9563.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+#include "nthw_clock_profiles.h"
+
+static int nthw_fpga_rst9563_setup(nt_fpga_t *p_fpga,
+				  struct nthw_fpga_rst_nt200a0x *const p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	const int n_fpga_version = p_fpga->m_fpga_version;
+	const int n_fpga_revision = p_fpga->m_fpga_revision;
+
+	nt_module_t *p_mod_rst;
+	nt_register_t *p_curr_reg;
+
+	assert(p);
+	p->mn_fpga_product_id = n_fpga_product_id;
+	p->mn_fpga_version = n_fpga_version;
+	p->mn_fpga_revision = n_fpga_revision;
+
+	NT_LOG(DBG, NTHW, "%s: %s: FPGA reset setup: FPGA %04d-%02d-%02d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision);
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+	if (p_mod_rst == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RST %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	/* RST register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_RST);
+	p->mp_fld_rst_sys = register_get_field(p_curr_reg, RST9563_RST_SYS);
+	p->mp_fld_rst_sys_mmcm = register_get_field(p_curr_reg, RST9563_RST_SYS_MMCM);
+	p->mp_fld_rst_core_mmcm =
+		register_get_field(p_curr_reg, RST9563_RST_CORE_MMCM);
+	p->mp_fld_rst_rpp = register_get_field(p_curr_reg, RST9563_RST_RPP);
+	p->mp_fld_rst_ddr4 = register_get_field(p_curr_reg, RST9563_RST_DDR4);
+	p->mp_fld_rst_sdc = register_get_field(p_curr_reg, RST9563_RST_SDC);
+	p->mp_fld_rst_phy = register_get_field(p_curr_reg, RST9563_RST_PHY);
+	p->mp_fld_rst_serdes_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_tx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_serdes_rx_datapath = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_pcs_rx = NULL; /* Field not present on 9563 */
+	p->mp_fld_rst_mac_rx = register_get_field(p_curr_reg, RST9563_RST_MAC_RX);
+	p->mp_fld_rst_mac_tx = NULL;
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ptp = register_get_field(p_curr_reg, RST9563_RST_PTP);
+	p->mp_fld_rst_ts = register_get_field(p_curr_reg, RST9563_RST_TS);
+	p->mp_fld_rst_ptp_mmcm = register_get_field(p_curr_reg, RST9563_RST_PTP_MMCM);
+	p->mp_fld_rst_ts_mmcm = register_get_field(p_curr_reg, RST9563_RST_TS_MMCM);
+	/* referenced in separate function */
+	p->mp_fld_rst_periph = register_get_field(p_curr_reg, RST9563_RST_PERIPH);
+	p->mp_fld_rst_tsm_ref_mmcm =
+		register_query_field(p_curr_reg, RST9563_RST_TSM_REF_MMCM);
+	p->mp_fld_rst_tmc = register_query_field(p_curr_reg, RST9563_RST_TMC);
+
+	if (!p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TSM_REF_MMCM found\n",
+		       p_adapter_id_str);
+	}
+	if (!p->mp_fld_rst_tmc) {
+		NT_LOG(DBG, NTHW, "%s: No RST9563_RST_TMC found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* CTRL register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_CTRL);
+	p->mp_fld_ctrl_ts_clk_sel_override =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL_OVERRIDE);
+	/* Field not present on 9563 */
+	p->mp_fld_ctrl_ts_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_TS_CLKSEL);
+	p->mp_fld_ctrl_ts_clk_sel_ref = NULL; /* Field not present on 9563 */
+	p->mp_fld_ctrl_ptp_mmcm_clk_sel =
+		register_get_field(p_curr_reg, RST9563_CTRL_PTP_MMCM_CLKSEL);
+	register_update(p_curr_reg);
+
+	/* STAT register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STAT);
+	p->mp_fld_stat_ddr4_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_MMCM_LOCKED);
+	p->mp_fld_stat_sys_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_SYS_MMCM_LOCKED);
+	p->mp_fld_stat_core_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_CORE_MMCM_LOCKED);
+	p->mp_fld_stat_ddr4_pll_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_DDR4_PLL_LOCKED);
+	p->mp_fld_stat_ptp_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_PTP_MMCM_LOCKED);
+	p->mp_fld_stat_ts_mmcm_locked =
+		register_get_field(p_curr_reg, RST9563_STAT_TS_MMCM_LOCKED);
+	p->mp_fld_stat_tsm_ref_mmcm_locked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STAT_TSM_REF_MMCM_LOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* STICKY register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_STICKY);
+	p->mp_fld_sticky_ptp_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_PTP_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ts_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_TS_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_MMCM_UNLOCKED);
+	p->mp_fld_sticky_ddr4_pll_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_DDR4_PLL_UNLOCKED);
+	p->mp_fld_sticky_core_mmcm_unlocked =
+		register_get_field(p_curr_reg, RST9563_STICKY_CORE_MMCM_UNLOCKED);
+	p->mp_fld_sticky_pci_sys_mmcm_unlocked = NULL; /* Field not present on 9563 */
+	p->mp_fld_sticky_tsm_ref_mmcm_unlocked = NULL; /* Field not present on 9563 */
+
+	if (!p->mp_fld_sticky_tsm_ref_mmcm_unlocked) {
+		NT_LOG(DBG, NTHW,
+		       "%s: No RST9563_STICKY_TSM_REF_MMCM_UNLOCKED found\n",
+		       p_adapter_id_str);
+	}
+	register_update(p_curr_reg);
+
+	/* POWER register field pointers */
+	p_curr_reg = module_get_register(p_mod_rst, RST9563_POWER);
+	p->mp_fld_power_pu_phy = register_get_field(p_curr_reg, RST9563_POWER_PU_PHY);
+	p->mp_fld_power_pu_nseb =
+		register_get_field(p_curr_reg, RST9563_POWER_PU_NSEB);
+	register_update(p_curr_reg);
+
+	return 0;
+}
+
+static int nthw_fpga_rst9563_periph_reset(nt_fpga_t *p_fpga)
+{
+	const char *const _unused p_adapter_id_str =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod_rst = fpga_query_module(p_fpga, MOD_RST9563, 0);
+
+	if (p_mod_rst) {
+		nt_register_t *p_reg_rst;
+		nt_field_t *p_fld_rst_periph;
+
+		NT_LOG(DBG, NTHW, "%s: PERIPH RST\n", p_adapter_id_str);
+		p_reg_rst = module_get_register(p_mod_rst, RST9563_RST);
+		p_fld_rst_periph = register_get_field(p_reg_rst, RST9563_RST_PERIPH);
+		field_set_flush(p_fld_rst_periph);
+		field_clr_flush(p_fld_rst_periph);
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int
+nthw_fpga_rst9563_clock_synth_init(nt_fpga_t *p_fpga,
+				  const int n_si_labs_clock_synth_model,
+				  const uint8_t n_si_labs_clock_synth_i2c_addr)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_fpga_product_id = p_fpga->m_product_id;
+	int res;
+
+	if (n_si_labs_clock_synth_model == 5340) {
+		res = nthw_fpga_si5340_clock_synth_init_fmt2(p_fpga,
+			n_si_labs_clock_synth_i2c_addr,
+			p_data_si5340_nt200a02_u23_v5,
+			n_data_si5340_nt200a02_u23_v5);
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fpga %d: Unsupported clock synth model (%d)\n",
+		       p_adapter_id_str, n_fpga_product_id, n_si_labs_clock_synth_model);
+		res = -1;
+	}
+	return res;
+}
+
+int nthw_fpga_rst9563_init(struct fpga_info_s *p_fpga_info,
+			  struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+	assert(p_rst);
+
+	const char *const _unused p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model;
+	uint8_t n_si_labs_clock_synth_i2c_addr;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+	n_si_labs_clock_synth_model = p_rst->mn_si_labs_clock_synth_model;
+	n_si_labs_clock_synth_i2c_addr = p_rst->mn_si_labs_clock_synth_i2c_addr;
+
+	res = nthw_fpga_rst9563_periph_reset(p_fpga);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_clock_synth_init(p_fpga, n_si_labs_clock_synth_model,
+						n_si_labs_clock_synth_i2c_addr);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst9563_setup(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	res = nthw_fpga_rst_nt200a0x_reset(p_fpga, p_rst);
+	if (res) {
+		NT_LOG(DBG, NTHW, "%s: ERROR: res=%d [%s:%u]\n", p_adapter_id_str,
+		       res, __func__, __LINE__);
+		return res;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
new file mode 100644
index 0000000000..ae63fefb09
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga.h"
+
+static const uint8_t si5338_u23_i2c_addr_7bit = 0x70;
+static const uint8_t si5340_u23_i2c_addr_7bit = 0x74;
+
+/*
+ * Wait until DDR4 PLL LOCKED
+ */
+static int nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(nt_fpga_t *p_fpga,
+	const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t locked;
+	uint32_t retrycount = 5;
+	uint32_t timeout = 50000; /* initial timeout must be set to 5 sec. */
+	/* 14: wait until DDR4 PLL LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 PLL to lock\n", p_adapter_id_str);
+	/*
+	 * The following retry count gives a total timeout of 1 * 5 + 5 * 8 = 45sec
+	 * It has been observed that at least 21sec can be necessary
+	 */
+	while (true) {
+		int locked = field_wait_set_any32(p->mp_fld_stat_ddr4_pll_locked,
+						  timeout, 100);
+		if (locked == 0)
+			break;
+		NT_LOG(DBG, NTHW,
+		       "%s: Waiting for DDR4 PLL to lock - timeout\n",
+		       p_adapter_id_str);
+		if (retrycount <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for DDR4 PLL to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			break;
+		}
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		field_clr_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		retrycount--;
+		timeout =
+			80000; /* Increase timeout for second attempt to 8 sec. */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for DDR4 MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ddr4_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for DDR4 MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+		return -1;
+	}
+
+	if ((true) && p->mp_fld_stat_tsm_ref_mmcm_locked) {
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked, -1,
+					    -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+			return -1;
+		}
+	}
+
+	/* 10: Clear all MMCM/PLL lock sticky bits before testing them */
+	NT_LOG(DBG, NTHW, "%s: Clear sticky MMCM unlock bits\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* 11: Ensure sticky bits are not unlocked except PTP MMCM and TS MMCM */
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+	}
+
+	return 0;
+}
+
+/*
+ * Wait for SDRAM controller has been calibrated - On some adapters we have seen
+ * calibration time of 2.3 seconds
+ */
+static int
+nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(nt_fpga_t *p_fpga,
+		const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nthw_sdc_t *p_nthw_sdc = NULL;
+	const int n_retry_cnt_max = 5;
+	int n_retry_cnt;
+	int res;
+
+	res = nthw_sdc_init(NULL, p_fpga, 0); /* probe for module */
+	if (res == 0) {
+		p_nthw_sdc = nthw_sdc_new();
+		if (p_nthw_sdc) {
+			res = nthw_sdc_init(p_nthw_sdc, p_fpga, 0);
+			if (res) {
+				NT_LOG(ERR, NTHW,
+				       "%s: SDC init failed: res=%d [%s:%d]\n",
+				       p_adapter_id_str, res, __func__, __LINE__);
+				nthw_sdc_delete(p_nthw_sdc);
+				p_nthw_sdc = NULL;
+				return -1;
+			}
+		} else {
+			nthw_sdc_delete(p_nthw_sdc);
+			p_nthw_sdc = NULL;
+		}
+	} else {
+		NT_LOG(DBG, NTHW, "%s: No SDC found\n", p_adapter_id_str);
+	}
+	n_retry_cnt = 0;
+	res = -1;
+	while ((res != 0) && (n_retry_cnt <= n_retry_cnt_max)) {
+		/* wait until DDR4 PLL LOCKED */
+		res = nthw_fpga_rst_nt200a0x_wait_ddr4_pll_locked(p_fpga, p);
+		if (res == 0) {
+			if (p_nthw_sdc) {
+				/*
+				 * Wait for SDRAM controller has been calibrated
+				 * On some adapters we have seen calibration time of 2.3 seconds
+				 */
+				NT_LOG(DBG, NTHW,
+				       "%s: Waiting for SDRAM to calibrate\n",
+				       p_adapter_id_str);
+				res = nthw_sdc_wait_states(p_nthw_sdc, 10000, 1000);
+				{
+					uint64_t n_result_mask;
+
+					int n_state_code _unused =
+						nthw_sdc_get_states(p_nthw_sdc,
+								  &n_result_mask);
+					NT_LOG(DBG, NTHW,
+					       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+					       p_adapter_id_str, n_result_mask,
+					       n_state_code, n_retry_cnt, res);
+				}
+				if (res == 0)
+					break;
+			}
+
+			if (n_retry_cnt >= n_retry_cnt_max) {
+				uint64_t n_result_mask;
+				int n_state_code _unused = nthw_sdc_get_states(p_nthw_sdc,
+									       &n_result_mask);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: SDRAM state=0x%08lX state_code=%d retry=%d code=%d\n",
+				       p_adapter_id_str, n_result_mask, n_state_code,
+				       n_retry_cnt, res);
+				if (res != 0) {
+					NT_LOG(ERR, NTHW,
+					       "%s: Timeout waiting for SDRAM controller calibration\n",
+					       p_adapter_id_str);
+				}
+			}
+		}
+
+		/*
+		 * SDRAM controller is not calibrated with DDR4 ram blocks:
+		 * reset DDR and perform calibration retry
+		 */
+		field_set_flush(p->mp_fld_rst_ddr4); /* Reset DDR PLL */
+		NT_OS_WAIT_USEC(100);
+		field_clr_flush(p->mp_fld_rst_ddr4);
+
+		n_retry_cnt++;
+	}
+	nthw_sdc_delete(p_nthw_sdc);
+
+	return res;
+}
+
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				const struct nthw_fpga_rst_nt200a0x *p)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const fpga_info_t *const p_fpga_info = p_fpga->p_fpga_info;
+
+	const int n_fpga_product_id = p->mn_fpga_product_id;
+	const int n_fpga_version = p->mn_fpga_version;
+	const int n_fpga_revision = p->mn_fpga_revision;
+	const int n_nthw_adapter_id = p_fpga_info->n_nthw_adapter_id;
+	const bool b_is_nt200a01 = (n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01);
+	const int n_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	const uint8_t index = 0;
+	int locked;
+	int res = -1;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: %s: FPGA reset sequence: FPGA %04d-%02d-%02d @ HWId%d\n",
+	       p_adapter_id_str, __func__, n_fpga_product_id, n_fpga_version,
+	       n_fpga_revision, n_hw_id);
+	assert(n_fpga_product_id == p_fpga->m_product_id);
+
+	/*
+	 * Reset all domains / modules except peripherals
+	 * Set default reset values to ensure that all modules are reset correctly
+	 * no matter if nic has been powercycled or ntservice has been reloaded
+	 */
+
+	/*
+	 * Reset to defaults
+	 * 1: Reset all domains
+	 */
+	NT_LOG(DBG, NTHW, "%s: RST defaults\n", p_adapter_id_str);
+
+	field_update_register(p->mp_fld_rst_sys);
+	field_set_flush(p->mp_fld_rst_sys);
+	if (p->mp_fld_rst_tmc)
+		field_set_flush(p->mp_fld_rst_tmc);
+	field_set_flush(p->mp_fld_rst_rpp);
+	field_set_flush(p->mp_fld_rst_ddr4); /* 0x07 3 banks */
+	field_set_flush(p->mp_fld_rst_sdc);
+
+	/* Reset port 0 and 1 in the following registers: */
+	field_set_flush(p->mp_fld_rst_phy); /* 0x03 2 ports */
+	if (p->mp_fld_rst_mac_rx)
+		field_set_flush(p->mp_fld_rst_mac_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_mac_tx)
+		field_set_flush(p->mp_fld_rst_mac_tx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_pcs_rx)
+		field_set_flush(p->mp_fld_rst_pcs_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx)
+		field_set_flush(p->mp_fld_rst_serdes_rx); /* 0x03 2 ports */
+
+	if (p->mp_fld_rst_serdes_rx_datapath) {
+		field_set_flush(p->mp_fld_rst_serdes_rx_datapath);
+		field_clr_flush(p->mp_fld_rst_serdes_rx);
+	}
+	if (p->mp_fld_rst_serdes_tx)
+		field_set_flush(p->mp_fld_rst_serdes_tx);
+
+	field_set_flush(p->mp_fld_rst_ptp);
+	field_set_flush(p->mp_fld_rst_ts);
+	field_set_flush(p->mp_fld_rst_sys_mmcm);
+	field_set_flush(p->mp_fld_rst_core_mmcm);
+	field_set_flush(p->mp_fld_rst_ptp_mmcm);
+	field_set_flush(p->mp_fld_rst_ts_mmcm);
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm)
+		field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+
+	/* Write all changes to register */
+	field_flush_register(p->mp_fld_rst_sys);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			field_update_register(p->mp_fld_rst_tsm_ref_mmcm);
+			field_set_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+	}
+
+	/*
+	 * 2: Force use of 50 MHz reference clock for timesync;
+	 * NOTE: From 9508-05-18 this is a 20 MHz clock
+	 */
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL OVERRIDE\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel_override);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel_override);
+
+	NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_ctrl_ts_clk_sel);
+	field_set_flush(p->mp_fld_ctrl_ts_clk_sel);
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: _selecting 20MHz TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref) {
+			field_update_register(p->mp_fld_ctrl_ts_clk_sel_ref);
+			field_clr_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		}
+	}
+
+	/* 4: De-assert sys reset, CORE and SYS MMCM resets */
+	NT_LOG(DBG, NTHW, "%s: De-asserting SYS, CORE and SYS MMCM resets\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys);
+	field_clr_flush(p->mp_fld_rst_sys_mmcm);
+	field_clr_flush(p->mp_fld_rst_core_mmcm);
+
+	/* 5: wait until CORE MMCM and SYS MMCM are LOCKED */
+	NT_LOG(DBG, NTHW, "%s: Waiting for SYS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_sys_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for SYS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Waiting for CORE MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_core_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for CORE MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	/* RAC RAB bus "flip/flip" reset second stage - new impl (ref RMT#37020) */
+	/* RAC/RAB init - SYS/CORE MMCM is locked - pull the remaining RAB buses out of reset */
+	{
+		nthw_rac_t *p_nthw_rac = p_fpga_info->mp_nthw_rac;
+
+		NT_LOG(DBG, NTHW, "%s: De-asserting remaining RAB buses\n",
+		       p_adapter_id_str);
+		nthw_rac_rab_init(p_nthw_rac, 0);
+	}
+
+	if ((true) && p->mp_fld_rst_tsm_ref_mmcm) {
+		NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			NT_LOG(DBG, NTHW,
+			       "%s: Waiting for TSM REF MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	/*
+	 * 5.2: Having ensured CORE MMCM and SYS MMCM are LOCKED,
+	 * we need to select the alternative 20 MHz reference clock,
+	 * the external TSM reference clock
+	 * on NT200A01 - build 2 HW only (see SSF00024 p.32)
+	 */
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		NT_LOG(DBG, NTHW, "%s: Setting TS CLK SEL REF\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_ctrl_ts_clk_sel_ref)
+			field_set_flush(p->mp_fld_ctrl_ts_clk_sel_ref);
+		if (p->mp_fld_rst_tsm_ref_mmcm) {
+			NT_LOG(DBG, NTHW, "%s: De-asserting TSM REF MMCM\n",
+			       p_adapter_id_str);
+			field_clr_flush(p->mp_fld_rst_tsm_ref_mmcm);
+		}
+		NT_LOG(DBG, NTHW, "%s: Waiting for TSM REF MMCM to lock\n",
+		       p_adapter_id_str);
+		if (p->mp_fld_stat_tsm_ref_mmcm_locked) {
+			locked = field_wait_set_any32(p->mp_fld_stat_tsm_ref_mmcm_locked,
+						      -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for TSM REF MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting all PHY resets\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_phy);
+	field_clr_flush(p->mp_fld_rst_phy);
+
+	/* MAC_PCS_XXV 10G/25G: 9530 / 9544 */
+	if (n_fpga_product_id == 9530 || n_fpga_product_id == 9544) {
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv0 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv0);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv0, p_fpga, 0, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv0, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv0);
+		}
+
+		{
+			/* Based on nt200e3_2_ptp.cpp My25GbPhy::resetRx */
+			nthw_mac_pcs_xxv_t *p_nthw_mac_pcs_xxv1 = nthw_mac_pcs_xxv_new();
+
+			assert(p_nthw_mac_pcs_xxv1);
+			nthw_mac_pcs_xxv_init(p_nthw_mac_pcs_xxv1, p_fpga, 1, 1);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, true,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_reset_rx_gt_data(p_nthw_mac_pcs_xxv1, false,
+						    index);
+			NT_OS_WAIT_USEC(1000);
+
+			nthw_mac_pcs_xxv_delete(p_nthw_mac_pcs_xxv1);
+		}
+		NT_OS_WAIT_USEC(3000);
+	}
+
+	/*
+	 * 8: De-assert reset for remaining domains/modules resets except
+	 * TS, PTP, PTP_MMCM and TS_MMCM
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TMC RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_tmc) {
+		field_update_register(p->mp_fld_rst_tmc);
+		field_clr_flush(p->mp_fld_rst_tmc);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting RPP RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_rpp);
+	field_clr_flush(p->mp_fld_rst_rpp);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting DDR4 RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_ddr4);
+	field_clr_flush(p->mp_fld_rst_ddr4);
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting SDC RST\n", p_adapter_id_str);
+	field_update_register(p->mp_fld_rst_sdc);
+	field_clr_flush(p->mp_fld_rst_sdc);
+
+	/* NOTE: 9522 implements PHY10G_QPLL reset and lock at this stage in mac_rx_rst() */
+	NT_LOG(DBG, NTHW, "%s: De-asserting MAC RX RST\n", p_adapter_id_str);
+	if (p->mp_fld_rst_mac_rx) {
+		field_update_register(p->mp_fld_rst_mac_rx);
+		field_clr_flush(p->mp_fld_rst_mac_rx);
+	}
+
+	/* await until DDR4 PLL LOCKED and SDRAM controller has been calibrated */
+	res = nthw_fpga_rst_nt200a0x_wait_sdc_calibrated(p_fpga, p);
+	if (res) {
+		NT_LOG(ERR, NTHW,
+		       "%s: nthw_fpga_rst_nt200a0x_wait_sdc_calibrated() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	/*
+	 * Timesync/PTP reset sequence
+	 * De-assert TS_MMCM reset
+	 */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS MMCM RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts_mmcm);
+
+	/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+	NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to lock\n", p_adapter_id_str);
+	locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+	if (locked != 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Waiting for TS MMCM to lock failed (%d)\n",
+		       p_adapter_id_str, locked);
+	}
+
+	NT_LOG(DBG, NTHW, "%s: Calling clear_sticky_mmcm_unlock_bits()\n",
+	       p_adapter_id_str);
+	field_update_register(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	/* Clear all sticky bits */
+	field_set_flush(p->mp_fld_sticky_ptp_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ts_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_mmcm_unlocked);
+	field_set_flush(p->mp_fld_sticky_ddr4_pll_unlocked);
+	field_set_flush(p->mp_fld_sticky_core_mmcm_unlocked);
+	if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_tsm_ref_mmcm_unlocked);
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked)
+		field_set_flush(p->mp_fld_sticky_pci_sys_mmcm_unlocked);
+
+	/* De-assert TS reset bit */
+	NT_LOG(DBG, NTHW, "%s: De-asserting TS RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ts);
+
+	if (field_get_updated(p->mp_fld_sticky_ts_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ts_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_ddr4_pll_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_ddr4_pll_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (field_get_updated(p->mp_fld_sticky_core_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_core_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (p->mp_fld_sticky_pci_sys_mmcm_unlocked &&
+			field_get_updated(p->mp_fld_sticky_pci_sys_mmcm_unlocked)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: get_sticky_pci_sys_mmcm_unlocked() returned true\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	if (b_is_nt200a01 && n_hw_id == 2) { /* Not relevant to NT200A02 */
+		if (p->mp_fld_sticky_tsm_ref_mmcm_unlocked &&
+				field_get_updated(p->mp_fld_sticky_tsm_ref_mmcm_unlocked)) {
+			NT_LOG(ERR, NTHW,
+			       "%s: get_sticky_tsm_ref_mmcm_unlocked() returned true\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+	}
+
+	if (false) {
+		/* Deassert PTP_MMCM */
+		NT_LOG(DBG, NTHW, "%s: De-asserting PTP MMCM RST\n",
+		       p_adapter_id_str);
+		field_clr_flush(p->mp_fld_rst_ptp_mmcm);
+
+		if ((b_is_nt200a01 && n_fpga_version >= 9) || !b_is_nt200a01) {
+			/* Wait until PTP_MMCM LOCKED */
+			NT_LOG(DBG, NTHW, "%s: Waiting for PTP MMCM to lock\n",
+			       p_adapter_id_str);
+			locked = field_wait_set_any32(p->mp_fld_stat_ptp_mmcm_locked,
+						    -1, -1);
+			if (locked != 0) {
+				NT_LOG(ERR, NTHW,
+				       "%s: Waiting for PTP MMCM to lock failed (%d)\n",
+				       p_adapter_id_str, locked);
+			}
+		}
+
+		/* Switch PTP MMCM sel to use ptp clk */
+		NT_LOG(DBG, NTHW, "%s: Setting PTP MMCM CLK SEL\n",
+		       p_adapter_id_str);
+		field_set_flush(p->mp_fld_ctrl_ptp_mmcm_clk_sel);
+
+		/* Wait until TS_MMCM LOCKED (NT_RAB0_REG_P9508_RST9508_STAT_TS_MMCM_LOCKED=1); */
+		NT_LOG(DBG, NTHW, "%s: Waiting for TS MMCM to re-lock\n",
+		       p_adapter_id_str);
+		locked = field_wait_set_any32(p->mp_fld_stat_ts_mmcm_locked, -1, -1);
+		if (locked != 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Waiting for TS MMCM to re-lock failed (%d)\n",
+			       p_adapter_id_str, locked);
+		}
+	}
+
+	NT_LOG(DBG, NTHW, "%s: De-asserting PTP RST\n", p_adapter_id_str);
+	field_clr_flush(p->mp_fld_rst_ptp);
+
+	/* POWER staging introduced in 9508-05-09 and always for 9512 */
+	if (n_fpga_product_id == 9508 && n_fpga_version <= 5 &&
+			n_fpga_revision <= 8) {
+		NT_LOG(DBG, NTHW, "%s: No power staging\n", p_adapter_id_str);
+	} else {
+		NT_LOG(DBG, NTHW, "%s: Staging power\n", p_adapter_id_str);
+		field_set_flush(p->mp_fld_power_pu_phy); /* PHY power up */
+		field_clr_flush(p->mp_fld_power_pu_nseb); /* NSEB power down */
+	}
+
+	NT_LOG(DBG, NTHW, "%s: %s: END\n", p_adapter_id_str, __func__);
+
+	return 0;
+}
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+			       struct nthw_fpga_rst_nt200a0x *p_rst)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = -1;
+	int n_si_labs_clock_synth_model = -1;
+	uint8_t n_si_labs_clock_synth_i2c_addr = 0;
+	nt_fpga_t *p_fpga = NULL;
+
+	p_fpga = p_fpga_info->mp_fpga;
+
+	NT_LOG(DBG, NTHW, "%s: %s: RAB init/reset\n", p_adapter_id_str, __func__);
+	nthw_rac_rab_reset(p_fpga_info->mp_nthw_rac);
+	nthw_rac_rab_setup(p_fpga_info->mp_nthw_rac);
+
+	res = nthw_fpga_avr_probe(p_fpga, 0);
+
+	res = nthw_fpga_iic_scan(p_fpga, 0, 0);
+	res = nthw_fpga_iic_scan(p_fpga, 2, 3);
+
+	/*
+	 * Detect clock synth model
+	 * check for NT200A02/NT200A01 HW-build2 - most commonly seen
+	 */
+	n_si_labs_clock_synth_i2c_addr = si5340_u23_i2c_addr_7bit;
+	n_si_labs_clock_synth_model =
+		nthw_fpga_silabs_detect(p_fpga, 0, n_si_labs_clock_synth_i2c_addr, 1);
+	if (n_si_labs_clock_synth_model == -1) {
+		/* check for old NT200A01 HW-build1 */
+		n_si_labs_clock_synth_i2c_addr = si5338_u23_i2c_addr_7bit;
+		n_si_labs_clock_synth_model =
+			nthw_fpga_silabs_detect(p_fpga, 0,
+						n_si_labs_clock_synth_i2c_addr, 255);
+		if (n_si_labs_clock_synth_model == -1) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Failed to detect clock synth model (%d)\n",
+			       p_adapter_id_str, n_si_labs_clock_synth_model);
+			return -1;
+		}
+	}
+	p_rst->mn_si_labs_clock_synth_model = n_si_labs_clock_synth_model;
+	p_rst->mn_si_labs_clock_synth_i2c_addr = n_si_labs_clock_synth_i2c_addr;
+	p_rst->mn_hw_id = p_fpga_info->nthw_hw_info.hw_id;
+	NT_LOG(DBG, NTHW, "%s: %s: Si%04d @ 0x%02x\n", p_adapter_id_str, __func__,
+	       p_rst->mn_si_labs_clock_synth_model, p_rst->mn_si_labs_clock_synth_i2c_addr);
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
new file mode 100644
index 0000000000..1f192f5ecc
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_fpga_rst_nt200a0x.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_RST_NT200A0X_H__
+#define __NTHW_FPGA_RST_NT200A0X_H__
+
+#include "nthw_drv.h"
+#include "nthw_fpga_model.h"
+
+struct nthw_fpga_rst_nt200a0x {
+	int mn_fpga_product_id;
+	int mn_fpga_version;
+	int mn_fpga_revision;
+
+	int mn_hw_id;
+
+	int mn_si_labs_clock_synth_model;
+	uint8_t mn_si_labs_clock_synth_i2c_addr;
+
+	nt_field_t *mp_fld_rst_sys;
+	nt_field_t *mp_fld_rst_sys_mmcm;
+	nt_field_t *mp_fld_rst_core_mmcm;
+	nt_field_t *mp_fld_rst_rpp;
+	nt_field_t *mp_fld_rst_ddr4;
+	nt_field_t *mp_fld_rst_sdc;
+	nt_field_t *mp_fld_rst_phy;
+	nt_field_t *mp_fld_rst_serdes_rx;
+	nt_field_t *mp_fld_rst_serdes_tx;
+	nt_field_t *mp_fld_rst_serdes_rx_datapath;
+	nt_field_t *mp_fld_rst_pcs_rx;
+	nt_field_t *mp_fld_rst_mac_rx;
+	nt_field_t *mp_fld_rst_mac_tx;
+	nt_field_t *mp_fld_rst_ptp;
+	nt_field_t *mp_fld_rst_ts;
+	nt_field_t *mp_fld_rst_ptp_mmcm;
+	nt_field_t *mp_fld_rst_ts_mmcm;
+	nt_field_t *mp_fld_rst_periph;
+	nt_field_t *mp_fld_rst_tsm_ref_mmcm;
+	nt_field_t *mp_fld_rst_tmc;
+
+	/* CTRL register field pointers */
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_override;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel;
+	nt_field_t *mp_fld_ctrl_ts_clk_sel_ref;
+	nt_field_t *mp_fld_ctrl_ptp_mmcm_clk_sel;
+
+	/* STAT register field pointers */
+	nt_field_t *mp_fld_stat_ddr4_mmcm_locked;
+	nt_field_t *mp_fld_stat_sys_mmcm_locked;
+	nt_field_t *mp_fld_stat_core_mmcm_locked;
+	nt_field_t *mp_fld_stat_ddr4_pll_locked;
+	nt_field_t *mp_fld_stat_ptp_mmcm_locked;
+	nt_field_t *mp_fld_stat_ts_mmcm_locked;
+	nt_field_t *mp_fld_stat_tsm_ref_mmcm_locked;
+
+	/* STICKY register field pointers */
+	nt_field_t *mp_fld_sticky_ptp_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ts_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_ddr4_pll_unlocked;
+	nt_field_t *mp_fld_sticky_core_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_pci_sys_mmcm_unlocked;
+	nt_field_t *mp_fld_sticky_tsm_ref_mmcm_unlocked;
+
+	/* POWER register field pointers */
+	nt_field_t *mp_fld_power_pu_phy;
+	nt_field_t *mp_fld_power_pu_nseb;
+	/*  */
+
+	void (*reset_serdes_rx)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			      uint32_t rst);
+	void (*pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			   uint32_t rst);
+	void (*get_serdes_rx_rst)(struct nthw_fpga_rst_nt200a0x *p,
+				  uint32_t intf_no, uint32_t *p_set);
+	void (*get_pcs_rx_rst)(struct nthw_fpga_rst_nt200a0x *p, uint32_t intf_no,
+			       uint32_t *p_set);
+	bool (*is_rst_serdes_rx_datapath_implemented)(struct nthw_fpga_rst_nt200a0x *p);
+};
+
+typedef struct nthw_fpga_rst_nt200a0x nthw_fpga_rst_nt200a0x_t;
+
+int nthw_fpga_rst_nt200a0x_init(struct fpga_info_s *p_fpga_info,
+				struct nthw_fpga_rst_nt200a0x *p_rst);
+int nthw_fpga_rst_nt200a0x_reset(nt_fpga_t *p_fpga,
+				 const struct nthw_fpga_rst_nt200a0x *p);
+
+#endif /* __NTHW_FPGA_RST_NT200A0X_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
new file mode 100644
index 0000000000..9b536726d0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gpio_phy.h"
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void)
+{
+	nthw_gpio_phy_t *p = malloc(sizeof(nthw_gpio_phy_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+	return p;
+}
+
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gpio_phy_t));
+		free(p);
+	}
+}
+
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_GPIO_PHY, n_instance);
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GPIO_PHY %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gpio_phy = p_mod;
+
+	/* Registers */
+	p->mp_reg_config = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_CFG);
+	p->mp_reg_gpio = module_get_register(p->mp_mod_gpio_phy, GPIO_PHY_GPIO);
+
+	/* PORT-0, config fields */
+	p->mpa_fields[0].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_LPMODE);
+	p->mpa_fields[0].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_INT_B);
+	p->mpa_fields[0].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_RESET_B);
+	p->mpa_fields[0].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT0_PLL_INTR);
+	p->mpa_fields[0].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT0_RXLOS);
+
+	/* PORT-1, config fields */
+	p->mpa_fields[1].cfg_fld_lp_mode =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_LPMODE);
+	p->mpa_fields[1].cfg_int =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_INT_B);
+	p->mpa_fields[1].cfg_reset =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_RESET_B);
+	p->mpa_fields[1].cfg_mod_prs =
+		register_get_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].cfg_pll_int =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_PORT1_PLL_INTR);
+	p->mpa_fields[1].cfg_port_rxlos =
+		register_query_field(p->mp_reg_config, GPIO_PHY_CFG_E_PORT1_RXLOS);
+
+	/* PORT-0, gpio fields */
+	p->mpa_fields[0].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_LPMODE);
+	p->mpa_fields[0].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_INT_B);
+	p->mpa_fields[0].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_RESET_B);
+	p->mpa_fields[0].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_MODPRS_B);
+
+	/* PORT-0, Non-mandatory fields (queryField) */
+	p->mpa_fields[0].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT0_PLL_INTR);
+	p->mpa_fields[0].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT0_RXLOS);
+
+	/* PORT-1, gpio fields */
+	p->mpa_fields[1].gpio_fld_lp_mode =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_LPMODE);
+	p->mpa_fields[1].gpio_int =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_INT_B);
+	p->mpa_fields[1].gpio_reset =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_RESET_B);
+	p->mpa_fields[1].gpio_mod_prs =
+		register_get_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_MODPRS_B);
+
+	/* PORT-1, Non-mandatory fields (queryField) */
+	p->mpa_fields[1].gpio_pll_int =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_PORT1_PLL_INTR);
+	p->mpa_fields[1].gpio_port_rxlos =
+		register_query_field(p->mp_reg_gpio, GPIO_PHY_GPIO_E_PORT1_RXLOS);
+
+	register_update(p->mp_reg_config);
+
+	return 0;
+}
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (field_get_updated(p->mpa_fields[if_no].gpio_fld_lp_mode))
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "INT_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_int))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "RESET_B" */
+	if (field_get_updated(p->mpa_fields[if_no].gpio_reset))
+		return false;
+
+	else
+		return true;
+}
+
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a negated GPIO PIN "MODPRS_B" */
+	return field_get_updated(p->mpa_fields[if_no].gpio_mod_prs) == 0U ? true :
+	       false;
+}
+
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	/* NOTE: This is a normal GPIO PIN "PLL_INTR" */
+	if (p->mpa_fields[if_no].gpio_pll_int) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_pll_int))
+			return true;
+
+		else
+			return false;
+	} else {
+		/* this HW doesn't support "PLL_INTR" (INTR from SyncE jitter attenuater) */
+		return false;
+	}
+}
+
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return false;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (field_get_updated(p->mpa_fields[if_no].gpio_port_rxlos))
+			return true;
+		else
+			return false;
+	} else {
+		return false;
+	}
+}
+
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_set_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+
+	else
+		field_clr_flush(p->mpa_fields[if_no].gpio_fld_lp_mode);
+	field_clr_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable output */
+}
+
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (enable)
+		field_clr_flush(p->mpa_fields[if_no].gpio_reset);
+
+	else
+		field_set_flush(p->mpa_fields[if_no].gpio_reset);
+	field_clr_flush(p->mpa_fields[if_no].cfg_reset); /* enable output */
+}
+
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	if (p->mpa_fields[if_no].gpio_port_rxlos) {
+		if (enable)
+			field_set_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+
+		else
+			field_clr_flush(p->mpa_fields[if_no].gpio_port_rxlos);
+	}
+}
+
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no)
+{
+	if (if_no >= ARRAY_SIZE(p->mpa_fields)) {
+		assert(false);
+		return;
+	}
+
+	field_set_flush(p->mpa_fields[if_no].cfg_fld_lp_mode); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_int); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_reset); /* enable input */
+	field_set_flush(p->mpa_fields[if_no].cfg_mod_prs); /* enable input */
+	if (p->mpa_fields[if_no].cfg_port_rxlos)
+		field_clr_flush(p->mpa_fields[if_no].cfg_port_rxlos); /* enable output */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
new file mode 100644
index 0000000000..1c6185150c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gpio_phy.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_GPIO_PHY_H_
+#define NTHW_GPIO_PHY_H_
+
+#define GPIO_PHY_INTERFACES (2)
+
+typedef struct {
+	nt_field_t *cfg_fld_lp_mode; /* Cfg Low Power Mode */
+	nt_field_t *cfg_int; /* Cfg Port Interrupt */
+	nt_field_t *cfg_reset; /* Cfg Reset */
+	nt_field_t *cfg_mod_prs; /* Cfg Module Present */
+	nt_field_t *cfg_pll_int; /* Cfg PLL Interrupt */
+	nt_field_t *cfg_port_rxlos; /* Emulate Cfg Port RXLOS */
+
+	nt_field_t *gpio_fld_lp_mode; /* Gpio Low Power Mode */
+	nt_field_t *gpio_int; /* Gpio Port Interrupt */
+	nt_field_t *gpio_reset; /* Gpio Reset */
+	nt_field_t *gpio_mod_prs; /* Gpio Module Present */
+	nt_field_t *gpio_pll_int; /* Gpio PLL Interrupt */
+	nt_field_t *gpio_port_rxlos; /* Emulate Gpio Port RXLOS */
+} gpio_phy_fields_t;
+
+struct nthw_gpio_phy {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gpio_phy;
+	int mn_instance;
+
+	/* Registers */
+	nt_register_t *mp_reg_config;
+	nt_register_t *mp_reg_gpio;
+
+	/* Fields */
+	gpio_phy_fields_t mpa_fields[GPIO_PHY_INTERFACES];
+};
+
+typedef struct nthw_gpio_phy nthw_gpio_phy_t;
+typedef struct nthw_gpio_phy nthw_gpio_phy;
+
+nthw_gpio_phy_t *nthw_gpio_phy_new(void);
+void nthw_gpio_phy_delete(nthw_gpio_phy_t *p);
+int nthw_gpio_phy_init(nthw_gpio_phy_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+bool nthw_gpio_phy_is_low_power_enabled(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_reset(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_module_present(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_pll_interrupt_set(nthw_gpio_phy_t *p, uint8_t if_no);
+bool nthw_gpio_phy_is_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no);
+void nthw_gpio_phy_set_low_power(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_reset(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_port_rxlos(nthw_gpio_phy_t *p, uint8_t if_no, bool enable);
+void nthw_gpio_phy_set_cfg_default_values(nthw_gpio_phy_t *p, uint8_t if_no);
+
+#endif /* NTHW_GPIO_PHY_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.c b/drivers/net/ntnic/nthw/core/nthw_hif.c
new file mode 100644
index 0000000000..7b7a919108
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.c
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_hif.h"
+
+nthw_hif_t *nthw_hif_new(void)
+{
+	nthw_hif_t *p = malloc(sizeof(nthw_hif_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_hif_t));
+	return p;
+}
+
+void nthw_hif_delete(nthw_hif_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_hif_t));
+		free(p);
+	}
+}
+
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str _unused =
+		p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_HIF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: HIF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_hif = mod;
+
+	/* default for (Xilinx-based) products until august 2022: (1e6/4000 = 250 MHz) */
+	p->mn_fpga_param_hif_per_ps =
+		fpga_get_product_param(p->mp_fpga, NT_HIF_PER_PS, 4000);
+	p->mn_fpga_hif_ref_clk_freq =
+		(uint32_t)(1000000000000ULL /
+			   (unsigned int)p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_prod_id_lsb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_LSB);
+	p->mp_fld_prod_id_lsb_rev_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_REV_ID);
+	p->mp_fld_prod_id_lsb_ver_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_VER_ID);
+	p->mp_fld_prod_id_lsb_group_id =
+		register_get_field(p->mp_reg_prod_id_lsb, HIF_PROD_ID_LSB_GROUP_ID);
+
+	p->mp_reg_prod_id_msb = module_get_register(p->mp_mod_hif, HIF_PROD_ID_MSB);
+	p->mp_fld_prod_id_msb_type_id =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_TYPE_ID);
+	p->mp_fld_prod_id_msb_build_no =
+		register_get_field(p->mp_reg_prod_id_msb, HIF_PROD_ID_MSB_BUILD_NO);
+
+	p->mp_reg_build_time = module_get_register(p->mp_mod_hif, HIF_BUILD_TIME);
+	p->mp_fld_build_time =
+		register_get_field(p->mp_reg_build_time, HIF_BUILD_TIME_TIME);
+
+	p->mn_fpga_id_prod = field_get_updated(p->mp_fld_prod_id_lsb_group_id);
+	p->mn_fpga_id_ver = field_get_updated(p->mp_fld_prod_id_lsb_ver_id);
+	p->mn_fpga_id_rev = field_get_updated(p->mp_fld_prod_id_lsb_rev_id);
+	p->mn_fpga_id_build_no = field_get_updated(p->mp_fld_prod_id_msb_build_no);
+	p->mn_fpga_id_item = field_get_updated(p->mp_fld_prod_id_msb_type_id);
+
+	NT_LOG(DBG, NTHW, "%s: HIF %d: %s: %d-%d-%d-%d-%d\n", p_adapter_id_str,
+	       p->mn_instance, __func__, p->mn_fpga_id_item, p->mn_fpga_id_prod,
+	       p->mn_fpga_id_ver, p->mn_fpga_id_rev, p->mn_fpga_id_build_no);
+	NT_LOG(DBG, NTHW,
+	       "%s: HIF %d: %s: HIF ref clock: %d Hz (%d ticks/ps)\n",
+	       p_adapter_id_str, p->mn_instance, __func__, p->mn_fpga_hif_ref_clk_freq,
+	       p->mn_fpga_param_hif_per_ps);
+
+	p->mp_reg_build_seed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_build_seed)
+		p->mp_fld_build_seed = NULL; /* Reg/Fld not present on HIF */
+	else
+		p->mp_fld_build_seed = NULL;
+
+	p->mp_reg_core_speed = NULL; /* Reg/Fld not present on HIF */
+	if (p->mp_reg_core_speed) {
+		p->mp_fld_core_speed = NULL; /* Reg/Fld not present on HIF */
+		p->mp_fld_ddr3_speed = NULL; /* Reg/Fld not present on HIF */
+	} else {
+		p->mp_reg_core_speed = NULL;
+		p->mp_fld_core_speed = NULL;
+		p->mp_fld_ddr3_speed = NULL;
+	}
+
+	/* Optional registers since: 2018-04-25 */
+	p->mp_reg_int_mask = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_clr = NULL; /* Reg/Fld not present on HIF */
+	p->mp_reg_int_force = NULL; /* Reg/Fld not present on HIF */
+
+	p->mp_fld_int_mask_timer = NULL;
+	p->mp_fld_int_clr_timer = NULL;
+	p->mp_fld_int_force_timer = NULL;
+
+	p->mp_fld_int_mask_port = NULL;
+	p->mp_fld_int_clr_port = NULL;
+	p->mp_fld_int_force_port = NULL;
+
+	p->mp_fld_int_mask_pps = NULL;
+	p->mp_fld_int_clr_pps = NULL;
+	p->mp_fld_int_force_pps = NULL;
+
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_hif, HIF_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, HIF_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_hif, HIF_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, HIF_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_hif, HIF_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, HIF_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk = module_get_register(p->mp_mod_hif, HIF_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk = register_get_field(p->mp_reg_stat_ref_clk,
+				   HIF_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_WR_ERR);
+		p->mp_fld_status_rd_err =
+			register_query_field(p->mp_reg_status, HIF_STATUS_RD_ERR);
+	} else {
+		p->mp_reg_status = module_query_register(p->mp_mod_hif, HIF_STATUS);
+		p->mp_fld_status_tags_in_use =
+			register_query_field(p->mp_reg_status, HIF_STATUS_TAGS_IN_USE);
+		p->mp_fld_status_wr_err = NULL;
+		p->mp_fld_status_rd_err = NULL;
+	}
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_hif, HIF_TEST0);
+	p->mp_fld_pci_test0 = register_get_field(p->mp_reg_pci_test0, HIF_TEST0_DATA);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_hif, HIF_TEST1);
+	p->mp_fld_pci_test1 = register_get_field(p->mp_reg_pci_test1, HIF_TEST1_DATA);
+
+	/* Required to run TSM */
+	p->mp_reg_sample_time = module_get_register(p->mp_mod_hif, HIF_SAMPLE_TIME);
+	if (p->mp_reg_sample_time) {
+		p->mp_fld_sample_time =
+			register_get_field(p->mp_reg_sample_time, HIF_SAMPLE_TIME_SAMPLE_TIME);
+	} else {
+		p->mp_fld_sample_time = NULL;
+	}
+
+	/* We need to optimize PCIe3 TLP-size read-request and extended tag usage */
+	{
+		p->mp_reg_config = module_query_register(p->mp_mod_hif, HIF_CONFIG);
+		if (p->mp_reg_config) {
+			p->mp_fld_max_tlp =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_TLP);
+			p->mp_fld_max_read =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_MAX_READ);
+			p->mp_fld_ext_tag =
+				register_get_field(p->mp_reg_config, HIF_CONFIG_EXT_TAG);
+		} else {
+			p->mp_fld_max_tlp = NULL;
+			p->mp_fld_max_read = NULL;
+			p->mp_fld_ext_tag = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int nthw_hif_setup_config(nthw_hif_t *p)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	/*
+	 * We need to optimize PCIe3 read-request and extended tag usage
+	 * original check: HW_ADAPTER_ID_NT200A02 HW_ADAPTER_ID_NT100A01 HW_ADAPTER_ID_NT50B01
+	 */
+	if (p->mp_fpga->p_fpga_info->n_nthw_adapter_id != NT_HW_ADAPTER_ID_NT40E3) {
+		if (p->mp_fld_max_read) {
+			/*
+			 * NOTE: On Pandion DELL server, this param was negotiated to 4096
+			 * (index=5), but the server crashed. For now we need to limit this value to
+			 * 512 (index=2)
+			 */
+			const uint32_t n_max_read_req_size =
+				field_get_updated(p->mp_fld_max_read);
+			if (n_max_read_req_size > 2) {
+				field_set_val_flush32(p->mp_fld_max_read, 2);
+				NT_LOG(INF, NTHW,
+				       "%s: %s: PCIe: MaxReadReqsize %d - changed to 2 (512B)\n",
+				       p_adapter_id_str, __func__,
+				       n_max_read_req_size);
+			}
+		}
+
+		if (p->mp_fld_ext_tag)
+			field_set_val_flush32(p->mp_fld_ext_tag, 1);
+
+		if (p->mp_fld_max_tlp && p->mp_fld_max_read && p->mp_fld_ext_tag) {
+			NT_LOG(INF, NTHW,
+			       "%s: %s: PCIe config: MaxTlp = %d, MaxReadReqsize = %d, ExtTagEna = %d\n",
+			       p_adapter_id_str, __func__,
+			       field_get_updated(p->mp_fld_max_tlp),
+			       field_get_updated(p->mp_fld_max_read),
+			       field_get_updated(p->mp_fld_ext_tag));
+		}
+	}
+	return 0;
+}
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = p->mn_fpga_hif_ref_clk_freq;
+
+	*p_tags_in_use = (p->mp_fld_status_tags_in_use ?
+		       field_get_updated(p->mp_fld_status_tags_in_use) :
+		       0);
+
+	*p_rd_err = (p->mp_fld_status_rd_err ? field_get_updated(p->mp_fld_status_rd_err) :
+		   0);
+	*p_wr_err = (p->mp_fld_status_wr_err ? field_get_updated(p->mp_fld_status_wr_err) :
+		   0);
+
+	return 0;
+}
+
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt, tg_unit_size, tg_ref_freq;
+	uint64_t n_tags_in_use, n_rd_err, n_wr_err;
+
+	nthw_hif_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size, &tg_ref_freq,
+			&n_tags_in_use, &n_rd_err, &n_wr_err);
+
+	*p_tags_in_use = n_tags_in_use;
+	if (n_rd_err)
+		(*p_rd_err_cnt)++;
+	if (n_wr_err)
+		(*p_wr_err_cnt)++;
+
+	if (ref_clk_cnt) {
+		uint64_t rx_rate;
+		uint64_t tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+	} else {
+		*p_pci_rx_rate = 0;
+		*p_pci_tx_rate = 0;
+		*p_ref_clk_cnt = 0;
+	}
+
+	return 0;
+}
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_req_disable(nthw_hif_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt)
+{
+	nthw_hif_stat_req_enable(p);
+	NT_OS_WAIT_USEC(100000);
+	nthw_hif_stat_req_disable(p);
+	nthw_hif_get_stat_rate(p, p_rx_rate, p_tx_rate, p_ref_clk_cnt, p_tags_in_use,
+			    p_rd_err_cnt, p_wr_err_cnt);
+
+	return 0;
+}
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc)
+{
+	assert(epc);
+
+	/* Get stat rate and maintain rx/tx min/max */
+	nthw_hif_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			    &epc->n_tags_in_use, &epc->n_rd_err, &epc->n_wr_err);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_hif.h b/drivers/net/ntnic/nthw/core/nthw_hif.h
new file mode 100644
index 0000000000..2701e222b3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HIF_H__
+#define __NTHW_HIF_H__
+
+#define NTHW_TG_CNT_SIZE (4ULL)
+
+struct nthw_hif {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_hif;
+	int mn_instance;
+
+	nt_register_t *mp_reg_prod_id_lsb;
+	nt_field_t *mp_fld_prod_id_lsb_rev_id;
+	nt_field_t *mp_fld_prod_id_lsb_ver_id;
+	nt_field_t *mp_fld_prod_id_lsb_group_id;
+
+	nt_register_t *mp_reg_prod_id_msb;
+	nt_field_t *mp_fld_prod_id_msb_type_id;
+	nt_field_t *mp_fld_prod_id_msb_build_no;
+
+	nt_register_t *mp_reg_build_time;
+	nt_field_t *mp_fld_build_time;
+
+	nt_register_t *mp_reg_build_seed;
+	nt_field_t *mp_fld_build_seed;
+
+	nt_register_t *mp_reg_core_speed;
+	nt_field_t *mp_fld_core_speed;
+	nt_field_t *mp_fld_ddr3_speed;
+
+	nt_register_t *mp_reg_int_mask;
+	nt_field_t *mp_fld_int_mask_timer;
+	nt_field_t *mp_fld_int_mask_port;
+	nt_field_t *mp_fld_int_mask_pps;
+
+	nt_register_t *mp_reg_int_clr;
+	nt_field_t *mp_fld_int_clr_timer;
+	nt_field_t *mp_fld_int_clr_port;
+	nt_field_t *mp_fld_int_clr_pps;
+
+	nt_register_t *mp_reg_int_force;
+	nt_field_t *mp_fld_int_force_timer;
+	nt_field_t *mp_fld_int_force_port;
+	nt_field_t *mp_fld_int_force_pps;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_status_tags_in_use;
+	nt_field_t *mp_fld_status_wr_err;
+	nt_field_t *mp_fld_status_rd_err;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+	nt_field_t *mp_fld_stat_ctrl_req;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+
+	nt_register_t *mp_reg_config;
+	nt_field_t *mp_fld_max_tlp;
+	nt_field_t *mp_fld_max_read;
+	nt_field_t *mp_fld_ext_tag;
+
+	int mn_fpga_id_item;
+	int mn_fpga_id_prod;
+	int mn_fpga_id_ver;
+	int mn_fpga_id_rev;
+	int mn_fpga_id_build_no;
+
+	int mn_fpga_param_hif_per_ps;
+	uint32_t mn_fpga_hif_ref_clk_freq;
+};
+
+typedef struct nthw_hif nthw_hif_t;
+typedef struct nthw_hif nthw_hif;
+
+struct nthw_hif_end_point_err_counters {
+	uint32_t n_err_correctable, n_err_non_fatal, n_err_fatal;
+};
+
+struct nthw_hif_end_point_counters {
+	int n_numa_node;
+
+	int n_tg_direction;
+	int n_tg_pkt_size;
+	int n_tg_num_pkts;
+	int n_tg_delay;
+
+	uint64_t cur_rx, cur_tx;
+	uint64_t cur_pci_nt_util, cur_pci_xil_util;
+	uint64_t n_ref_clk_cnt;
+
+	uint64_t n_tags_in_use;
+	uint64_t n_rd_err;
+	uint64_t n_wr_err;
+
+	struct nthw_hif_end_point_err_counters s_rc_ep_pre, s_rc_ep_post, s_rc_ep_delta;
+	struct nthw_hif_end_point_err_counters s_ep_rc_pre, s_ep_rc_post, s_ep_rc_delta;
+
+	int bo_error;
+};
+
+struct nthw_hif_end_points {
+	struct nthw_hif_end_point_counters pri, sla;
+};
+
+nthw_hif_t *nthw_hif_new(void);
+void nthw_hif_delete(nthw_hif_t *p);
+int nthw_hif_init(nthw_hif_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_hif_setup_config(nthw_hif_t *p);
+
+int nthw_hif_trigger_sample_time(nthw_hif_t *p);
+
+int nthw_hif_stat_req_enable(nthw_hif_t *p);
+int nthw_hif_stat_req_disable(nthw_hif_t *p);
+int nthw_hif_stat_sample(nthw_hif_t *p, uint64_t *p_rx_rate, uint64_t *p_tx_rate,
+		       uint64_t *p_ref_clk_cnt, uint64_t *p_tags_in_use,
+		       uint64_t *p_rd_err_cnt, uint64_t *p_wr_err_cnt);
+
+int nthw_hif_get_stat(nthw_hif_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		    uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		    uint32_t *p_tg_ref_freq, uint64_t *p_tags_in_use,
+		    uint64_t *p_rd_err, uint64_t *p_wr_err);
+int nthw_hif_get_stat_rate(nthw_hif_t *p, uint64_t *p_pci_rx_rate,
+			uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			uint64_t *p_tags_in_use, uint64_t *p_rd_err_cnt,
+			uint64_t *p_wr_err_cnt);
+
+int nthw_hif_end_point_counters_sample(nthw_hif_t *p,
+				   struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_HIF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.c b/drivers/net/ntnic/nthw/core/nthw_iic.c
new file mode 100644
index 0000000000..14aee221ce
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_iic.h"
+
+#define I2C_TRANSMIT_WR (0x00)
+#define I2C_TRANSMIT_RD (0x01)
+
+#define I2C_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+/*
+ * Minimum timing values for I2C for a Marvel 88E11111 Phy.
+ * This Phy is used in many Trispeed NIMs.
+ * In order to access this Phy, the I2C clock speed is needed to be set to 100KHz.
+ */
+static const uint32_t susta = 4700; /* ns */
+static const uint32_t susto = 4000; /* ns */
+static const uint32_t hdsta = 4000; /* ns */
+static const uint32_t sudat = 250; /* ns */
+static const uint32_t buf = 4700; /* ns */
+static const uint32_t high = 4000; /* ns */
+static const uint32_t low = 4700; /* ns */
+static const uint32_t hddat = 300; /* ns */
+
+static int nthw_iic_reg_control_txfifo_reset(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_txfifo_reset);
+
+	field_set_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	field_clr_all(p->mp_fld_cr_txfifo_reset);
+	field_flush_register(p->mp_fld_cr_txfifo_reset);
+
+	return 0;
+}
+
+static int nthw_iic_reg_tx_fifo_write(nthw_iic_t *p, uint32_t data, bool start,
+				     bool stop)
+{
+	if (start)
+		field_set_all(p->mp_fld_tx_fifo_start);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_start);
+
+	if (stop)
+		field_set_all(p->mp_fld_tx_fifo_stop);
+
+	else
+		field_clr_all(p->mp_fld_tx_fifo_stop);
+
+	field_set_val32(p->mp_fld_tx_fifo_txdata, data);
+
+	register_flush(p->mp_reg_tx_fifo, 1);
+
+	return 0;
+}
+
+static int nthw_iic_reg_read_i2c_rx_fifo(nthw_iic_t *p, uint8_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = (uint8_t)field_get_updated(p->mp_fld_rx_fifo_rxdata);
+
+	return 0;
+}
+
+static int nthw_iic_reg_softr(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_val_flush32(p->mp_fld_softr_rkey, 0x0A);
+
+	return 0;
+}
+
+static int nthw_iic_reg_enable(nthw_iic_t *p)
+{
+	field_update_register(p->mp_fld_cr_en);
+	field_set_flush(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+static int nthw_iic_reg_busbusy(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_bb) ? true : false;
+
+	return 0;
+}
+
+static int nthw_iic_reg_rxfifo_empty(nthw_iic_t *p, bool *pb_flag)
+{
+	assert(pb_flag);
+
+	*pb_flag = field_get_updated(p->mp_fld_sr_rxfifo_empty) ? true : false;
+
+	return 0;
+}
+
+/*
+ * nIicCycleTime is the I2C clock cycle time in ns ie 125MHz = 8ns
+ */
+static int nthw_iic_reg_set_timing(nthw_iic_t *p, uint32_t n_iic_cycle_time)
+{
+	uint32_t val;
+
+	val = susta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusta, &val, 1);
+
+	val = susto / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsusto, &val, 1);
+
+	val = hdsta / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thdsta, &val, 1);
+
+	val = sudat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tsudat, &val, 1);
+
+	val = buf / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tbuf, &val, 1);
+
+	val = high / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thigh, &val, 1);
+
+	val = low / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_tlow, &val, 1);
+
+	val = hddat / n_iic_cycle_time;
+	field_set_val_flush(p->mp_fld_thddat, &val, 1);
+
+	return 0;
+}
+
+nthw_iic_t *nthw_iic_new(void)
+{
+	nthw_iic_t *p = malloc(sizeof(nthw_iic_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_iic_t));
+	return p;
+}
+
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_IIC, n_iic_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: I2C %d: no such instance\n",
+		       p_adapter_id_str, n_iic_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_iic_instance = n_iic_instance;
+
+	p->mn_iic_cycle_time = n_iic_cycle_time;
+
+	nthw_iic_set_retry_params(p, -1, -1, -1, -1, -1);
+
+	p->mp_mod_iic = mod;
+
+	/* I2C is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_iic, 0x00);
+
+	p->mp_reg_tsusta = module_get_register(p->mp_mod_iic, IIC_TSUSTA);
+	p->mp_fld_tsusta =
+		register_get_field(p->mp_reg_tsusta, IIC_TSUSTA_TSUSTA_VAL);
+
+	p->mp_reg_tsusto = module_get_register(p->mp_mod_iic, IIC_TSUSTO);
+	p->mp_fld_tsusto =
+		register_get_field(p->mp_reg_tsusto, IIC_TSUSTO_TSUSTO_VAL);
+
+	p->mp_reg_thdsta = module_get_register(p->mp_mod_iic, IIC_THDSTA);
+	p->mp_fld_thdsta =
+		register_get_field(p->mp_reg_thdsta, IIC_THDSTA_THDSTA_VAL);
+
+	p->mp_reg_tsudat = module_get_register(p->mp_mod_iic, IIC_TSUDAT);
+	p->mp_fld_tsudat =
+		register_get_field(p->mp_reg_tsudat, IIC_TSUDAT_TSUDAT_VAL);
+
+	p->mp_reg_tbuf = module_get_register(p->mp_mod_iic, IIC_TBUF);
+	p->mp_fld_tbuf = register_get_field(p->mp_reg_tbuf, IIC_TBUF_TBUF_VAL);
+
+	p->mp_reg_thigh = module_get_register(p->mp_mod_iic, IIC_THIGH);
+	p->mp_fld_thigh = register_get_field(p->mp_reg_thigh, IIC_THIGH_THIGH_VAL);
+
+	p->mp_reg_tlow = module_get_register(p->mp_mod_iic, IIC_TLOW);
+	p->mp_fld_tlow = register_get_field(p->mp_reg_tlow, IIC_TLOW_TLOW_VAL);
+
+	p->mp_reg_thddat = module_get_register(p->mp_mod_iic, IIC_THDDAT);
+	p->mp_fld_thddat =
+		register_get_field(p->mp_reg_thddat, IIC_THDDAT_THDDAT_VAL);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_iic, IIC_CR);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, IIC_CR_EN);
+	p->mp_fld_cr_msms = register_get_field(p->mp_reg_cr, IIC_CR_MSMS);
+	p->mp_fld_cr_txfifo_reset =
+		register_get_field(p->mp_reg_cr, IIC_CR_TXFIFO_RESET);
+	p->mp_fld_cr_txak = register_get_field(p->mp_reg_cr, IIC_CR_TXAK);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_iic, IIC_SR);
+	p->mp_fld_sr_bb = register_get_field(p->mp_reg_sr, IIC_SR_BB);
+	p->mp_fld_sr_rxfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_FULL);
+	p->mp_fld_sr_rxfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_RXFIFO_EMPTY);
+	p->mp_fld_sr_txfifo_full =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_FULL);
+	p->mp_fld_sr_txfifo_empty =
+		register_get_field(p->mp_reg_sr, IIC_SR_TXFIFO_EMPTY);
+
+	p->mp_reg_tx_fifo = module_get_register(p->mp_mod_iic, IIC_TX_FIFO);
+	p->mp_fld_tx_fifo_txdata =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_TXDATA);
+	p->mp_fld_tx_fifo_start =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_START);
+	p->mp_fld_tx_fifo_stop =
+		register_get_field(p->mp_reg_tx_fifo, IIC_TX_FIFO_STOP);
+
+	p->mp_reg_rx_fifo_pirq =
+		module_get_register(p->mp_mod_iic, IIC_RX_FIFO_PIRQ);
+	p->mp_fld_rx_fifo_pirq_cmp_val =
+		register_get_field(p->mp_reg_rx_fifo_pirq, IIC_RX_FIFO_PIRQ_CMP_VAL);
+
+	p->mp_reg_rx_fifo = module_get_register(p->mp_mod_iic, IIC_RX_FIFO);
+	p->mp_fld_rx_fifo_rxdata =
+		register_get_field(p->mp_reg_rx_fifo, IIC_RX_FIFO_RXDATA);
+
+	p->mp_reg_softr = module_get_register(p->mp_mod_iic, IIC_SOFTR);
+	p->mp_fld_softr_rkey = register_get_field(p->mp_reg_softr, IIC_SOFTR_RKEY);
+
+	/*
+	 * Initialize I2C controller by applying soft reset and enable the controller
+	 */
+	nthw_iic_reg_softr(p);
+	/* Enable the controller */
+	nthw_iic_reg_enable(p);
+
+	/* Setup controller timing */
+	if (p->mn_iic_cycle_time) {
+		NT_LOG(DBG, NTHW, "%s: I2C%d: cycletime=%d\n", p_adapter_id_str,
+		       p->mn_iic_instance, p->mn_iic_cycle_time);
+		nthw_iic_reg_set_timing(p, p->mn_iic_cycle_time);
+	}
+
+	/* Reset TX fifo - must be after enable */
+	nthw_iic_reg_control_txfifo_reset(p);
+	nthw_iic_reg_tx_fifo_write(p, 0, 0, 0);
+
+	return 0;
+}
+
+void nthw_iic_delete(nthw_iic_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_iic_t));
+		free(p);
+	}
+}
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry)
+{
+	p->mn_poll_delay = n_poll_delay >= 0 ? n_poll_delay : 10;
+
+	p->mn_bus_ready_retry = n_bus_ready_retry >= 0 ? n_bus_ready_retry : 1000;
+	p->mn_data_ready_retry = n_data_ready_retry >= 0 ? n_data_ready_retry : 1000;
+
+	p->mn_read_data_retry = n_read_data_retry >= 0 ? n_read_data_retry : 10;
+	p->mn_write_data_retry = n_write_data_retry >= 0 ? n_write_data_retry : 10;
+
+	return 0;
+}
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	const int n_debug_mode = module_get_debug_mode(p->mp_mod_iic);
+
+	uint8_t *pb = (uint8_t *)p_void;
+	int retry = (p->mn_read_data_retry >= 0 ? p->mn_read_data_retry : 10);
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW, "%s: adr=0x%2.2x, reg=%d, len=%d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len);
+	}
+
+	while (nthw_iic_readbyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Read retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Read retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	if (n_debug_mode == 0xff) {
+		NT_LOG(DBG, NTHW,
+		       "%s: adr=0x%2.2x, reg=%d, len=%d, retries remaining: %d\n",
+		       p_adapter_id_str, dev_addr, reg_addr, data_len, retry);
+	}
+
+	return 0;
+}
+
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	uint32_t value;
+	uint32_t i;
+
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		/* Write device address + RD bit to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_RD;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write DataLen to TX_FIFO and set stop bit!! */
+		nthw_iic_reg_tx_fifo_write(p, data_len, 0, 1);
+
+		for (i = 0; i < data_len; i++) {
+			/* Wait for RX FIFO not empty */
+			if (!nthw_iic_data_ready(p))
+				return -1;
+
+			/* Read DataLen bytes from RX_FIFO */
+			nthw_iic_reg_read_i2c_rx_fifo(p, p_byte);
+			p_byte++;
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(ERR, NTHW, "%s: error: (%s:%u)\n", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int retry = (p->mn_write_data_retry >= 0 ? p->mn_write_data_retry : 10);
+	uint8_t *pb = (uint8_t *)p_void;
+
+	while (nthw_iic_writebyte(p, dev_addr, reg_addr, data_len, pb) != 0) {
+		retry--;
+		if (retry <= 0) {
+			NT_LOG(ERR, NTHW,
+			       "%s: I2C%d: Write retry exhausted (dev_addr=%d reg_addr=%d)\n",
+			       p_adapter_id_str, p->mn_iic_instance, dev_addr,
+			       reg_addr);
+			return -1;
+		}
+#if defined(DEBUG)
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: Write retry=%d (dev_addr=%d reg_addr=%d)\n",
+		       p_adapter_id_str, p->mn_iic_instance, retry, dev_addr,
+		       reg_addr);
+#endif
+	}
+
+	return 0;
+}
+
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte)
+{
+	const char *const p_adapter_id_str = p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t value;
+	int count;
+	int i;
+
+	if (data_len == 0)
+		return -1;
+
+	count = data_len - 1;
+	if (nthw_iic_bus_ready(p)) {
+		/* Reset TX fifo */
+		nthw_iic_reg_control_txfifo_reset(p);
+
+		/* Write device address to TX_FIFO and set start bit!! */
+		value = (dev_addr << 1) | I2C_TRANSMIT_WR;
+		nthw_iic_reg_tx_fifo_write(p, value, 1, 0);
+
+		/* Write reg_addr to TX FIFO */
+		nthw_iic_reg_tx_fifo_write(p, reg_addr, 0, 0);
+
+		for (i = 0; i < count; i++) {
+			/* Write data byte to TX fifo and set stop bit */
+			nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 0);
+			p_byte++;
+		}
+
+		/* Write data byte to TX fifo and set stop bit */
+		nthw_iic_reg_tx_fifo_write(p, *p_byte, 0, 1);
+
+		if (!nthw_iic_bus_ready(p)) {
+			NT_LOG(WRN, NTHW, "%s: warn: !busReady (%s:%u)\n",
+			       p_adapter_id_str, __func__, __LINE__);
+			while (true) {
+				if (nthw_iic_bus_ready(p)) {
+					NT_LOG(DBG, NTHW,
+					       "%s: info: busReady (%s:%u)\n",
+					       p_adapter_id_str, __func__,
+					       __LINE__);
+					break;
+				}
+			}
+		}
+
+		return 0;
+
+	} else {
+		NT_LOG(WRN, NTHW, "%s: (%s:%u)\n", p_adapter_id_str, __func__,
+		       __LINE__);
+		return -1;
+	}
+}
+
+/*
+ * Support function for read/write functions below. Waits for bus ready.
+ */
+bool nthw_iic_bus_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_bus_ready_retry >= 0 ? p->mn_bus_ready_retry : 1000);
+	bool b_bus_busy = true;
+
+	while (true) {
+		nthw_iic_reg_busbusy(p, &b_bus_busy);
+		if (!b_bus_busy)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Support function for read function. Waits for data ready.
+ */
+bool nthw_iic_data_ready(nthw_iic_t *p)
+{
+	int count = (p->mn_data_ready_retry >= 0 ? p->mn_data_ready_retry : 1000);
+	bool b_rx_fifo_empty = true;
+
+	while (true) {
+		nthw_iic_reg_rxfifo_empty(p, &b_rx_fifo_empty);
+		if (!b_rx_fifo_empty)
+			break;
+
+		count--;
+		if (count <= 0)   /* Test for timeout */
+			break;
+
+		if (p->mn_poll_delay != 0)
+			I2C_WAIT_US(p->mn_poll_delay);
+	}
+
+	if (count == 0)
+		return false;
+
+	return true;
+}
+
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int res;
+	uint8_t data_val = -1;
+
+	res = nthw_iic_readbyte(p, (uint8_t)n_dev_addr, (uint8_t)n_reg_addr, 1,
+			       &data_val);
+	if (res == 0) {
+		NT_LOG(DBG, NTHW,
+		       "%s: I2C%d: devaddr=0x%02X (%03d) regaddr=%02X val=%02X (%03d) res=%d\n",
+		       p_adapter_id_str, p->mn_iic_instance, n_dev_addr, n_dev_addr,
+		       n_reg_addr, data_val, data_val, res);
+	}
+	return res;
+}
+
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate)
+{
+	const char *const p_adapter_id_str _unused =
+		p->mp_fpga->p_fpga_info->mp_adapter_id_str;
+
+	int res = 0;
+	int i = 0;
+
+	if (b_increate) {
+		for (i = n_dev_addr_start; i < 128; i++) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	} else {
+		for (i = n_dev_addr_start; i >= 0; i--) {
+			res = nthw_iic_scan_dev_addr(p, i, 0x00);
+			if (res == 0)
+				break;
+		}
+	}
+	NT_LOG(DBG, NTHW, "%s: I2C%d: FOUND: %d\n", p_adapter_id_str,
+	       p->mn_iic_instance, i);
+	return (res == 0 ? i : -1);
+}
+
+int nthw_iic_scan(nthw_iic_t *p)
+{
+	int i;
+
+	for (i = 0; i < 128; i++)
+		(void)nthw_iic_scan_dev_addr(p, i, 0x00);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_iic.h b/drivers/net/ntnic/nthw/core/nthw_iic.h
new file mode 100644
index 0000000000..e3bd313c88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_iic.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_IIC_H__
+#define __NTHW_IIC_H__
+
+#include "nthw_fpga_model.h"
+
+struct nthw_iic {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_iic;
+	int mn_iic_instance;
+
+	uint32_t mn_iic_cycle_time;
+	int mn_poll_delay;
+	int mn_bus_ready_retry;
+	int mn_data_ready_retry;
+	int mn_read_data_retry;
+	int mn_write_data_retry;
+
+	nt_register_t *mp_reg_tsusta;
+	nt_field_t *mp_fld_tsusta;
+
+	nt_register_t *mp_reg_tsusto;
+	nt_field_t *mp_fld_tsusto;
+
+	nt_register_t *mp_reg_thdsta;
+	nt_field_t *mp_fld_thdsta;
+
+	nt_register_t *mp_reg_tsudat;
+	nt_field_t *mp_fld_tsudat;
+
+	nt_register_t *mp_reg_tbuf;
+	nt_field_t *mp_fld_tbuf;
+
+	nt_register_t *mp_reg_thigh;
+	nt_field_t *mp_fld_thigh;
+
+	nt_register_t *mp_reg_tlow;
+	nt_field_t *mp_fld_tlow;
+
+	nt_register_t *mp_reg_thddat;
+	nt_field_t *mp_fld_thddat;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_msms;
+	nt_field_t *mp_fld_cr_txfifo_reset;
+	nt_field_t *mp_fld_cr_txak;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_bb;
+	nt_field_t *mp_fld_sr_rxfifo_full;
+	nt_field_t *mp_fld_sr_rxfifo_empty;
+	nt_field_t *mp_fld_sr_txfifo_full;
+	nt_field_t *mp_fld_sr_txfifo_empty;
+
+	nt_register_t *mp_reg_tx_fifo;
+	nt_field_t *mp_fld_tx_fifo_txdata;
+	nt_field_t *mp_fld_tx_fifo_start;
+	nt_field_t *mp_fld_tx_fifo_stop;
+
+	nt_register_t *mp_reg_rx_fifo_pirq;
+	nt_field_t *mp_fld_rx_fifo_pirq_cmp_val;
+
+	nt_register_t *mp_reg_rx_fifo;
+	nt_field_t *mp_fld_rx_fifo_rxdata;
+
+	nt_register_t *mp_reg_softr;
+	nt_field_t *mp_fld_softr_rkey;
+};
+
+typedef struct nthw_iic nthw_iic_t;
+typedef struct nthw_iic nthw_iic;
+
+nthw_iic_t *nthw_iic_new(void);
+int nthw_iic_init(nthw_iic_t *p, nt_fpga_t *p_fpga, int n_iic_instance,
+		 uint32_t n_iic_cycle_time);
+void nthw_iic_delete(nthw_iic_t *p);
+
+int nthw_iic_set_retry_params(nthw_iic_t *p, const int n_poll_delay,
+			   const int n_bus_ready_retry, const int n_data_ready_retry,
+			   const int n_read_data_retry, const int n_write_data_retry);
+
+int nthw_iic_read_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, void *p_void);
+int nthw_iic_readbyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		     uint8_t data_len, uint8_t *p_byte);
+int nthw_iic_write_data(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, void *p_void);
+int nthw_iic_writebyte(nthw_iic_t *p, uint8_t dev_addr, uint8_t reg_addr,
+		      uint8_t data_len, uint8_t *p_byte);
+bool nthw_iic_bus_ready(nthw_iic_t *p);
+bool nthw_iic_data_ready(nthw_iic_t *p);
+
+int nthw_iic_scan(nthw_iic_t *p);
+int nthw_iic_scan_dev_addr(nthw_iic_t *p, int n_dev_addr, int n_reg_addr);
+int nthw_iic_scan_find_dev(nthw_iic_t *p, int n_dev_addr_start, bool b_increate);
+
+#endif /* __NTHW_IIC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
new file mode 100644
index 0000000000..448caf1fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs.h"
+
+#define NTHW_MAC_PCS_LANES (20)
+
+static const uint8_t c_pcs_lanes = NTHW_MAC_PCS_LANES;
+static const uint8_t c_mac_pcs_receiver_mode_dfe _unused;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void)
+{
+	nthw_mac_pcs_t *p = malloc(sizeof(nthw_mac_pcs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+	return p;
+}
+
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_t));
+		free(p);
+	}
+}
+
+/*
+ * Parameters:
+ *   p != NULL: init struct pointed to by p
+ *   p == NULL: check fpga module(s) presence (but no struct to init)
+ *
+ * Return value:
+ *  <0: if p == NULL then fpga module(s) is/are not present.
+ *      if p != NULL then fpga module(s) is/are not present, struct undefined
+ * ==0: if p == NULL then fpga module(s) is/are present (no struct to init)
+ *    : if p != NULL then fpga module(s) is/are present and struct initialized
+ */
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_MAC_PCS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: MAC_PCS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs = mod;
+
+	assert(n_instance >= 0 && n_instance <= 255);
+	nthw_mac_pcs_set_port_no(p, (uint8_t)n_instance);
+
+	{
+		nt_register_t *p_reg_block_lock, *p_reg_stat_pcs_rx,
+			      *p_reg_stat_pcs_rx_latch;
+		nt_register_t *p_reg_vl_demuxed, *p_reg_gty_stat, *p_reg_pcs_config,
+			      *p_reg_phymac_misc;
+		const int product_id = fpga_get_product_id(p_fpga);
+
+		p_reg_block_lock =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BLOCK_LOCK);
+		p->mp_reg_block_lock = p_reg_block_lock;
+		p->mp_fld_block_lock_lock =
+			register_get_field(p_reg_block_lock, MAC_PCS_BLOCK_LOCK_LOCK);
+
+		p_reg_stat_pcs_rx =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_STAT_PCS_RX);
+		p->mp_reg_stat_pcs_rx = p_reg_stat_pcs_rx;
+		p->mp_fld_stat_pcs_rx_status =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_STATUS);
+		p->mp_fld_stat_pcs_rx_aligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED);
+		p->mp_fld_stat_pcs_rx_aligned_err =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_ALIGNED_ERR);
+		p->mp_fld_stat_pcs_rx_misaligned =
+			register_get_field(p_reg_stat_pcs_rx, MAC_PCS_STAT_PCS_RX_MISALIGNED);
+		p->mp_fld_stat_pcs_rx_internal_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_received_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_local_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_LOCAL_FAULT);
+		p->mp_fld_stat_pcs_rx_remote_fault =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_REMOTE_FAULT);
+		p->mp_fld_stat_pcs_rx_hi_ber =
+			register_get_field(p_reg_stat_pcs_rx,
+					   MAC_PCS_STAT_PCS_RX_HI_BER);
+
+		p_reg_stat_pcs_rx_latch =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_STAT_PCS_RX_LATCH);
+		p->mp_reg_stat_pcs_rx_latch = p_reg_stat_pcs_rx_latch;
+		p->mp_fld_stat_pcs_rx_latch_status =
+			register_get_field(p_reg_stat_pcs_rx_latch,
+					   MAC_PCS_STAT_PCS_RX_LATCH_STATUS);
+
+		p_reg_vl_demuxed =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_VL_DEMUXED);
+		p->mp_fld_vl_demuxed_lock =
+			register_get_field(p_reg_vl_demuxed, MAC_PCS_VL_DEMUXED_LOCK);
+
+		p_reg_gty_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_STAT);
+		p->mp_fld_gty_stat_tx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_0);
+		p->mp_fld_gty_stat_tx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_1);
+		p->mp_fld_gty_stat_tx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_2);
+		p->mp_fld_gty_stat_tx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_TX_RST_DONE_3);
+		p->mp_fld_gty_stat_rx_rst_done0 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_0);
+		p->mp_fld_gty_stat_rx_rst_done1 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_1);
+		p->mp_fld_gty_stat_rx_rst_done2 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_2);
+		p->mp_fld_gty_stat_rx_rst_done3 =
+			register_get_field(p_reg_gty_stat, MAC_PCS_GTY_STAT_RX_RST_DONE_3);
+
+		p->m_fld_block_lock_lock_mask = 0;
+		p->m_fld_vl_demuxed_lock_mask = 0;
+		p->m_fld_gty_stat_tx_rst_done_mask = 0;
+		p->m_fld_gty_stat_rx_rst_done_mask = 0;
+
+		if (product_id == 9563) {
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_block_lock_lock_mask = (1 << 20) - 1;
+			/* NT200A01_2X100 implements 20 virtual lanes */
+			p->m_fld_vl_demuxed_lock_mask = (1 << 20) - 1;
+			p->m_fld_gty_stat_tx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+			p->m_fld_gty_stat_rx_rst_done_mask =
+				1; /* NT200A01_2X100 implements 4 GTY */
+		} else {
+			/* Remember to add new productIds */
+			assert(0);
+		}
+
+		p_reg_pcs_config = module_get_register(p->mp_mod_mac_pcs,
+						   MAC_PCS_MAC_PCS_CONFIG);
+		p->mp_fld_pcs_config_tx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST);
+		p->mp_fld_pcs_config_rx_path_rst =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST);
+		p->mp_fld_pcs_config_rx_enable =
+			register_get_field(p_reg_pcs_config, MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE);
+		p->mp_fld_pcs_config_rx_force_resync =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC);
+		p->mp_fld_pcs_config_rx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN);
+		p->mp_fld_pcs_config_tx_enable =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE);
+		p->mp_fld_pcs_config_tx_send_idle =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE);
+		p->mp_fld_pcs_config_tx_send_rfi =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI);
+		p->mp_fld_pcs_config_tx_test_pattern =
+			register_get_field(p_reg_pcs_config,
+					   MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN);
+
+		p->mp_reg_gty_loop =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_LOOP);
+		p->mp_fld_gty_loop_gt_loop0 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_0);
+		p->mp_fld_gty_loop_gt_loop1 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_1);
+		p->mp_fld_gty_loop_gt_loop2 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_2);
+		p->mp_fld_gty_loop_gt_loop3 =
+			register_get_field(p->mp_reg_gty_loop, MAC_PCS_GTY_LOOP_GT_LOOP_3);
+
+		p_reg_phymac_misc =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_PHYMAC_MISC);
+		p->mp_reg_phymac_misc = p_reg_phymac_misc;
+		p->mp_fld_phymac_misc_tx_sel_host =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_HOST);
+		p->mp_fld_phymac_misc_tx_sel_tfg =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_TFG);
+		p->mp_fld_phymac_misc_tx_sel_rx_loop =
+			register_get_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP);
+
+		/* SOP or EOP TIMESTAMP */
+		p->mp_fld_phymac_misc_ts_eop =
+			register_query_field(p_reg_phymac_misc, MAC_PCS_PHYMAC_MISC_TS_EOP);
+
+		p->mp_reg_link_summary =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_LINK_SUMMARY);
+		p->mp_fld_link_summary_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_ABS);
+		p->mp_fld_link_summary_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE);
+		p->mp_fld_link_summary_lh_abs =
+			register_get_field(p->mp_reg_link_summary, MAC_PCS_LINK_SUMMARY_LH_ABS);
+		p->mp_fld_link_summary_ll_nt_phy_link_state =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE);
+		p->mp_fld_link_summary_link_down_cnt =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT);
+		p->mp_fld_link_summary_nim_interr =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_NIM_INTERR);
+		p->mp_fld_link_summary_lh_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT);
+		p->mp_fld_link_summary_lh_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT);
+		p->mp_fld_link_summary_local_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_LOCAL_FAULT);
+		p->mp_fld_link_summary_remote_fault =
+			register_get_field(p->mp_reg_link_summary,
+					   MAC_PCS_LINK_SUMMARY_REMOTE_FAULT);
+
+		p->mp_reg_bip_err =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_BIP_ERR);
+		p->mp_fld_reg_bip_err_bip_err =
+			register_get_field(p->mp_reg_bip_err, MAC_PCS_BIP_ERR_BIP_ERR);
+
+		p->mp_reg_fec_ctrl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CTRL);
+		p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in =
+			register_get_field(p->mp_reg_fec_ctrl, MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN);
+
+		p->mp_reg_fec_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_STAT);
+		p->mp_field_fec_stat_bypass =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_BYPASS);
+		p->mp_field_fec_stat_valid =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_VALID);
+		p->mp_field_fec_stat_am_lock0 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_0);
+		p->mp_field_fec_stat_am_lock1 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_1);
+		p->mp_field_fec_stat_am_lock2 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_2);
+		p->mp_field_fec_stat_am_lock3 =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_AM_LOCK_3);
+		p->mp_field_fec_stat_fec_lane_algn =
+			register_get_field(p->mp_reg_fec_stat, MAC_PCS_FEC_STAT_FEC_LANE_ALGN);
+
+		p->mp_reg_fec_cw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_CW_CNT);
+		p->mp_field_fec_cw_cnt_cw_cnt =
+			register_get_field(p->mp_reg_fec_cw_cnt, MAC_PCS_FEC_CW_CNT_CW_CNT);
+
+		p->mp_reg_fec_ucw_cnt =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_FEC_UCW_CNT);
+		p->mp_field_fec_ucw_cnt_ucw_cnt =
+			register_get_field(p->mp_reg_fec_ucw_cnt, MAC_PCS_FEC_UCW_CNT_UCW_CNT);
+
+		/* GTY_RX_BUF_STAT */
+#ifdef RXBUFSTAT
+		p->mp_reg_gty_rx_buf_stat =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_RX_BUF_STAT);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2);
+		p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3 =
+			register_get_field(p->mp_reg_gty_rx_buf_stat,
+					   MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3);
+#endif
+
+		/* GTY_PRE_CURSOR */
+		p->mp_reg_gty_pre_cursor =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_PRE_CURSOR);
+		p->mp_field_gty_pre_cursor_tx_pre_csr0 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0);
+		p->mp_field_gty_pre_cursor_tx_pre_csr1 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1);
+		p->mp_field_gty_pre_cursor_tx_pre_csr2 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2);
+		p->mp_field_gty_pre_cursor_tx_pre_csr3 =
+			register_get_field(p->mp_reg_gty_pre_cursor,
+					   MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3);
+
+		/* GTY_DIFF_CTL */
+		p->mp_reg_gty_diff_ctl =
+			module_get_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_DIFF_CTL);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2);
+		p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3 =
+			register_get_field(p->mp_reg_gty_diff_ctl,
+					   MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3);
+
+		/* GTY_POST_CURSOR */
+		p->mp_reg_gty_post_cursor =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_GTY_POST_CURSOR);
+		p->mp_field_gty_post_cursor_tx_post_csr0 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0);
+		p->mp_field_gty_post_cursor_tx_post_csr1 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1);
+		p->mp_field_gty_post_cursor_tx_post_csr2 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2);
+		p->mp_field_gty_post_cursor_tx_post_csr3 =
+			register_get_field(p->mp_reg_gty_post_cursor,
+					   MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3);
+
+		/* GTY_CTL */
+		p->mp_reg_gty_ctl =
+			module_query_register(p->mp_mod_mac_pcs, MAC_PCS_GTY_CTL);
+		if (p->mp_reg_gty_ctl) {
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		} else {
+			p->mp_reg_gty_ctl =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_RX);
+			p->mp_reg_gty_ctl_tx =
+				module_get_register(p->mp_mod_mac_pcs,
+						    MAC_PCS_GTY_CTL_TX);
+			p->mp_field_gty_ctl_tx_pol0 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_0);
+			p->mp_field_gty_ctl_tx_pol1 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_1);
+			p->mp_field_gty_ctl_tx_pol2 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_2);
+			p->mp_field_gty_ctl_tx_pol3 =
+				register_get_field(p->mp_reg_gty_ctl_tx,
+						   MAC_PCS_GTY_CTL_TX_POLARITY_3);
+		}
+		p->mp_field_gty_ctl_rx_pol0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_0);
+		p->mp_field_gty_ctl_rx_pol1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_1);
+		p->mp_field_gty_ctl_rx_pol2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_2);
+		p->mp_field_gty_ctl_rx_pol3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_POLARITY_3);
+		p->mp_field_gty_ctl_rx_lpm_en0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_0);
+		p->mp_field_gty_ctl_rx_lpm_en1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_1);
+		p->mp_field_gty_ctl_rx_lpm_en2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_2);
+		p->mp_field_gty_ctl_rx_lpm_en3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_LPM_EN_3);
+		p->mp_field_gty_ctl_rx_equa_rst0 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_0);
+		p->mp_field_gty_ctl_rx_equa_rst1 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_1);
+		p->mp_field_gty_ctl_rx_equa_rst2 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_2);
+		p->mp_field_gty_ctl_rx_equa_rst3 =
+			register_get_field(p->mp_reg_gty_ctl,
+					   MAC_PCS_GTY_CTL_RX_EQUA_RST_3);
+
+		/* DEBOUNCE_CTRL */
+		p->mp_reg_debounce_ctrl =
+			module_get_register(p->mp_mod_mac_pcs,
+					    MAC_PCS_DEBOUNCE_CTRL);
+		p->mp_field_debounce_ctrl_nt_port_ctrl =
+			register_get_field(p->mp_reg_debounce_ctrl,
+					   MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL);
+
+		p->mp_reg_time_stamp_comp =
+			module_query_register(p->mp_mod_mac_pcs,
+					      MAC_PCS_TIMESTAMP_COMP);
+		if (p->mp_reg_time_stamp_comp) {
+			/* TIMESTAMP_COMP */
+			p->mp_field_time_stamp_comp_rx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_RX_DLY);
+			p->mp_field_time_stamp_comp_tx_dly =
+				register_get_field(p->mp_reg_time_stamp_comp,
+						   MAC_PCS_TIMESTAMP_COMP_TX_DLY);
+		}
+	}
+	return 0;
+}
+
+/* wrapper - for ease of use */
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	nthw_mac_pcs_set_tx_sel_host(p, enable);
+	nthw_mac_pcs_set_tx_sel_tfg(p, !enable);
+}
+
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_enable);
+}
+
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_enable);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_enable);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_enable);
+}
+
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_host);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_host);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_host);
+}
+
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_phymac_misc_tx_sel_tfg);
+	if (enable)
+		field_set_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+
+	else
+		field_clr_flush(p->mp_fld_phymac_misc_tx_sel_tfg);
+}
+
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable)
+{
+	if (p->mp_fld_phymac_misc_ts_eop) {
+		field_get_updated(p->mp_fld_phymac_misc_ts_eop);
+		if (enable)
+			field_set_flush(p->mp_fld_phymac_misc_ts_eop);
+
+		else
+			field_clr_flush(p->mp_fld_phymac_misc_ts_eop);
+	}
+}
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock;
+	uint32_t lane_lock;
+
+	block_lock = field_get_updated(p->mp_fld_block_lock_lock) &
+		    p->m_fld_block_lock_lock_mask;
+	lane_lock = field_get_updated(p->mp_fld_vl_demuxed_lock) &
+		   p->m_fld_vl_demuxed_lock_mask;
+	if (block_lock == p->m_fld_block_lock_lock_mask &&
+			lane_lock == p->m_fld_vl_demuxed_lock_mask)
+		return true;
+
+	return false;
+}
+
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_path_rst);
+}
+
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_path_rst);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_pcs_config_rx_path_rst);
+}
+
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_tx_send_rfi);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_tx_send_rfi);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_tx_send_rfi);
+}
+
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable)
+{
+	field_get_updated(p->mp_fld_pcs_config_rx_force_resync);
+	if (enable)
+		field_set_flush(p->mp_fld_pcs_config_rx_force_resync);
+
+	else
+		field_clr_flush(p->mp_fld_pcs_config_rx_force_resync);
+}
+
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_rx_rst_done0) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done1) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done2) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_rx_rst_done3) &
+			p->m_fld_gty_stat_rx_rst_done_mask) == p->m_fld_gty_stat_rx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p)
+{
+	if ((field_get_updated(p->mp_fld_gty_stat_tx_rst_done0) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done1) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done2) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask &&
+			(field_get_updated(p->mp_fld_gty_stat_tx_rst_done3) &
+			p->m_fld_gty_stat_tx_rst_done_mask) == p->m_fld_gty_stat_tx_rst_done_mask)
+		return true;
+
+	else
+		return false;
+}
+
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 2);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 2);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable)
+{
+	register_update(p->mp_reg_gty_loop);
+	if (enable) {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 4);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 4);
+	} else {
+		field_set_val32(p->mp_fld_gty_loop_gt_loop0, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop1, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop2, 0);
+		field_set_val32(p->mp_fld_gty_loop_gt_loop3, 0);
+	}
+	register_flush(p->mp_reg_gty_loop, 1);
+}
+
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p)
+{
+	uint32_t lane_bit_errors[NTHW_MAC_PCS_LANES];
+
+	register_update(p->mp_reg_bip_err);
+	field_get_val(p->mp_fld_reg_bip_err_bip_err, (uint32_t *)lane_bit_errors,
+		     ARRAY_SIZE(lane_bit_errors));
+
+#if defined(DEBUG)
+	{
+		uint8_t lane;
+
+		for (lane = 0; lane < c_pcs_lanes; lane++) {
+			if (lane_bit_errors[lane]) {
+				NT_LOG(DBG, NTHW,
+				       "Port %u: pcsLane %2u: BIP8 errors: %u\n",
+				       p->m_port_no, lane, lane_bit_errors[lane]);
+			}
+		}
+	}
+#else
+	(void)c_pcs_lanes; /* unused - kill warning */
+#endif
+}
+
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status)
+{
+	*status = field_get_updated(p->mp_fld_stat_pcs_rx_status) & 0x01;
+}
+
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_stat_pcs_rx_hi_ber);
+}
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+	uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+	uint32_t *p_ll_nt_phy_link_state,
+	uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+	uint32_t *p_lh_local_fault,
+	uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+	uint32_t *p_remote_fault)
+{
+	register_update(p->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(p->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(p->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(p->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(p->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(p->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(p->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_local_fault)
+		*p_local_fault = field_get_val32(p->mp_fld_link_summary_local_fault);
+	if (p_remote_fault)
+		*p_remote_fault = field_get_val32(p->mp_fld_link_summary_remote_fault);
+}
+
+/*
+ * Returns true if the lane/block lock bits indicate that a reset is required.
+ * This is the case if Block/Lane lock is not all zero but not all set either.
+ */
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p)
+{
+	uint32_t block_lock = nthw_mac_pcs_get_fld_block_lock_lock(p);
+	uint32_t lane_lock = nthw_mac_pcs_get_fld_lane_lock_lock(p);
+	uint32_t block_lock_mask = nthw_mac_pcs_get_fld_block_lock_lock_mask(p);
+	uint32_t lane_lock_mask = nthw_mac_pcs_get_fld_lane_lock_lock_mask(p);
+
+	return ((block_lock != 0) && (block_lock != block_lock_mask)) ||
+	       ((lane_lock != 0) && (lane_lock != lane_lock_mask));
+}
+
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable)
+{
+	NT_LOG(DBG, NTHW, "Port %u: Set FEC: %u\n", p->m_port_no, enable);
+
+	field_get_updated(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in);
+	if (enable) {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in, 0);
+	} else {
+		field_set_val_flush32(p->mp_field_fec_ctrl_reg_rs_fec_ctrl_in,
+				    (1 << 5) - 1);
+	}
+
+	/* Both Rx and Tx must be reset for new FEC state to become active */
+	nthw_mac_pcs_rx_path_rst(p, true);
+	nthw_mac_pcs_tx_path_rst(p, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	nthw_mac_pcs_rx_path_rst(p, false);
+	nthw_mac_pcs_tx_path_rst(p, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+#ifdef DEBUG
+	if (enable) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC valid: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_valid));
+	} else {
+		NT_LOG(DBG, NTHW, "Port %u: FEC bypass: %u\n", p->m_port_no,
+		       field_get_updated(p->mp_field_fec_stat_bypass));
+	}
+#endif
+}
+
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_bypass);
+}
+
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_valid);
+}
+
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_field_fec_stat_fec_lane_algn);
+}
+
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) ||
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	if ((field_get_val32(p->mp_field_fec_stat_am_lock0)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock1)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock2)) &&
+			(field_get_val32(p->mp_field_fec_stat_am_lock3)))
+		return true;
+	return false;
+}
+
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_stat);
+	NT_LOG(DBG, NTHW,
+	       "Port %u: FEC_STAT VALID: %u, AM_LOCK_0: %u, AM_LOCK_1: %u, AM_LOCK_2: %u, AM_LOCK_0: %u, FEC_LANE_ALGN: %u\n",
+	       p->m_port_no, field_get_val32(p->mp_field_fec_stat_valid),
+	       field_get_val32(p->mp_field_fec_stat_am_lock0),
+	       field_get_val32(p->mp_field_fec_stat_am_lock1),
+	       field_get_val32(p->mp_field_fec_stat_am_lock2),
+	       field_get_val32(p->mp_field_fec_stat_am_lock3),
+	       field_get_val32(p->mp_field_fec_stat_fec_lane_algn));
+}
+
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_fec_cw_cnt);
+	register_update(p->mp_reg_fec_ucw_cnt);
+
+	if (field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_cw_cnt_cw_cnt));
+	}
+	if (field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u\n", p->m_port_no,
+		       field_get_val32(p->mp_field_fec_ucw_cnt_ucw_cnt));
+	}
+}
+
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p)
+{
+	register_update(p->mp_reg_gty_rx_buf_stat);
+
+	if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed0) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed1) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed2) ||
+			field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat_changed3)) {
+		if (field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2) ||
+				field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3)) {
+			NT_LOG(DBG, NTHW,
+			       "Port %u: GTY RX_BUF_STAT_0: %u, RX_BUF_STAT_1: %u, RX_BUF_STAT_2: %u, RX_BUF_STAT_3: %u\n",
+			       p->m_port_no,
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat0),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat1),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat2),
+			       field_get_val32(p->mp_field_gty_rx_buf_stat_rx_buf_stat3));
+
+			return true;
+		}
+	}
+	return false;
+}
+
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr)
+{
+	/* GTY_PRE_CURSOR */
+	register_update(p->mp_reg_gty_pre_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr0,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr1,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr2,
+				    tx_pre_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_pre_cursor_tx_pre_csr3,
+				    tx_pre_csr & 0x1F);
+		break;
+	}
+
+	/* GTY_DIFF_CTL */
+	register_update(p->mp_reg_gty_diff_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl0,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl1,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl2,
+				    tx_diff_ctl & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_gty_diff_ctl_tx_diff_ctl3,
+				    tx_diff_ctl & 0x1F);
+		break;
+	}
+
+	/* GTY_POST_CURSOR */
+	register_update(p->mp_reg_gty_post_cursor);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr0,
+				    tx_post_csr & 0x1F);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr1,
+				    tx_post_csr & 0x1F);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr2,
+				    tx_post_csr & 0x1F);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_post_cursor_tx_post_csr3,
+				    tx_post_csr & 0x1F);
+		break;
+	}
+
+	NT_LOG(DBG, NTHW,
+	       "Port %u, lane %u: GTY txPreCsr: %d, txDiffCtl: %d, txPostCsr: %d\n",
+	       p->m_port_no, lane, tx_pre_csr, tx_diff_ctl, tx_post_csr);
+}
+
+/*
+ * Set receiver equalization mode
+ *  0: enable DFE
+ *  mode 1: enable LPM
+ *
+ * See UltraScale Architecture GTY Transceivers www.xilinx.com page 181,
+ * UG578 (v1.1) November 24, 2015
+ */
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	register_update(p->mp_reg_gty_ctl);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en0, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en1, mode & 0x1);
+	field_set_val32(p->mp_field_gty_ctl_rx_lpm_en2, mode & 0x1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_lpm_en3, mode & 0x1);
+
+	/* Toggle reset */
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 1);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 1);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 1);
+
+	NT_OS_WAIT_USEC(1000); /* 1ms */
+
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst0, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst1, 0);
+	field_set_val32(p->mp_field_gty_ctl_rx_equa_rst2, 0);
+	field_set_val_flush32(p->mp_field_gty_ctl_rx_equa_rst3, 0);
+
+	NT_LOG(DBG, NTHW, "Port %u: GTY receiver mode: %s\n", p->m_port_no,
+	       (mode == c_mac_pcs_receiver_mode_dfe ? "DFE" : "LPM"));
+}
+
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_tx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Tx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap)
+{
+	register_update(p->mp_reg_gty_ctl);
+	switch (lane) {
+	case 0:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol0, swap);
+		break;
+	case 1:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol1, swap);
+		break;
+	case 2:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol2, swap);
+		break;
+	case 3:
+		field_set_val_flush32(p->mp_field_gty_ctl_rx_pol3, swap);
+		break;
+	}
+	NT_LOG(DBG, NTHW, "Port %u: set GTY Rx lane (%d) polarity: %d\n",
+	       p->m_port_no, lane, swap);
+}
+
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode)
+{
+	field_get_updated(p->mp_field_debounce_ctrl_nt_port_ctrl);
+	field_set_val_flush32(p->mp_field_debounce_ctrl_nt_port_ctrl, mode);
+}
+
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly)
+{
+	if (p->mp_field_time_stamp_comp_rx_dly) {
+		field_get_updated(p->mp_field_time_stamp_comp_rx_dly);
+		field_set_val_flush32(p->mp_field_time_stamp_comp_rx_dly, rx_dly);
+	}
+}
+
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_block_lock_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_block_lock_lock_mask;
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p)
+{
+	return field_get_updated(p->mp_fld_vl_demuxed_lock);
+}
+
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p)
+{
+	return p->m_fld_vl_demuxed_lock_mask;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
new file mode 100644
index 0000000000..08197f8b9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_H_
+#define NTHW_MAC_PCS_H_
+
+enum nthw_mac_pcs_led_mode_e {
+	NTHW_MAC_PCS_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_LED_ON = 0x01,
+	NTHW_MAC_PCS_LED_OFF = 0x02,
+	NTHW_MAC_PCS_LED_PORTID = 0x03,
+};
+
+#define nthw_mac_pcs_receiver_mode_dfe (0)
+#define nthw_mac_pcs_receiver_mode_lpm (1)
+
+struct nthw_mac_pcs {
+	uint8_t m_port_no;
+
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs;
+	int mn_instance;
+
+	/* Block lock status */
+	nt_field_t *mp_fld_block_lock_lock;
+	uint32_t m_fld_block_lock_lock_mask;
+
+	/* Lane lock status */
+	nt_field_t *mp_fld_vl_demuxed_lock;
+	uint32_t m_fld_vl_demuxed_lock_mask;
+
+	/* GTY_STAT */
+	nt_field_t *mp_fld_gty_stat_rx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_rx_rst_done3;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done0;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done1;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done2;
+	nt_field_t *mp_fld_gty_stat_tx_rst_done3;
+	uint32_t m_fld_gty_stat_rx_rst_done_mask;
+	uint32_t m_fld_gty_stat_tx_rst_done_mask;
+
+	/* GTY_LOOP */
+	nt_register_t *mp_reg_gty_loop;
+	nt_field_t *mp_fld_gty_loop_gt_loop0;
+	nt_field_t *mp_fld_gty_loop_gt_loop1;
+	nt_field_t *mp_fld_gty_loop_gt_loop2;
+	nt_field_t *mp_fld_gty_loop_gt_loop3;
+
+	/* MAC_PCS_CONFIG */
+	nt_field_t *mp_fld_pcs_config_tx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_path_rst;
+	nt_field_t *mp_fld_pcs_config_rx_enable;
+	nt_field_t *mp_fld_pcs_config_rx_force_resync;
+	nt_field_t *mp_fld_pcs_config_rx_test_pattern;
+	nt_field_t *mp_fld_pcs_config_tx_enable;
+	nt_field_t *mp_fld_pcs_config_tx_send_idle;
+	nt_field_t *mp_fld_pcs_config_tx_send_rfi;
+	nt_field_t *mp_fld_pcs_config_tx_test_pattern;
+
+	/* STAT PCS */
+	nt_field_t *mp_fld_stat_pcs_rx_status;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned;
+	nt_field_t *mp_fld_stat_pcs_rx_aligned_err;
+	nt_field_t *mp_fld_stat_pcs_rx_misaligned;
+	nt_field_t *mp_fld_stat_pcs_rx_internal_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_received_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_local_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_remote_fault;
+	nt_field_t *mp_fld_stat_pcs_rx_hi_ber;
+
+	/* STAT_PCS_RX_LATCH */
+	nt_field_t *mp_fld_stat_pcs_rx_latch_status;
+
+	/* PHYMAC_MISC */
+	nt_field_t *mp_fld_phymac_misc_tx_sel_host;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_tfg;
+	nt_field_t *mp_fld_phymac_misc_tx_sel_rx_loop;
+	nt_field_t *mp_fld_phymac_misc_ts_eop;
+
+	/* LINK_SUMMARY */
+	nt_register_t *mp_reg_link_summary;
+	nt_field_t *mp_fld_link_summary_abs;
+	nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_lh_abs;
+	nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+	nt_field_t *mp_fld_link_summary_link_down_cnt;
+	nt_field_t *mp_fld_link_summary_nim_interr;
+	nt_field_t *mp_fld_link_summary_lh_local_fault;
+	nt_field_t *mp_fld_link_summary_lh_remote_fault;
+	nt_field_t *mp_fld_link_summary_local_fault;
+	nt_field_t *mp_fld_link_summary_remote_fault;
+
+	/* BIP_ERR */
+	nt_register_t *mp_reg_bip_err;
+	nt_field_t *mp_fld_reg_bip_err_bip_err;
+
+	/* FEC_CTRL */
+	nt_register_t *mp_reg_fec_ctrl;
+	nt_field_t *mp_field_fec_ctrl_reg_rs_fec_ctrl_in;
+
+	/* FEC_STAT */
+	nt_register_t *mp_reg_fec_stat;
+	nt_field_t *mp_field_fec_stat_bypass;
+	nt_field_t *mp_field_fec_stat_valid;
+	nt_field_t *mp_field_fec_stat_am_lock0;
+	nt_field_t *mp_field_fec_stat_am_lock1;
+	nt_field_t *mp_field_fec_stat_am_lock2;
+	nt_field_t *mp_field_fec_stat_am_lock3;
+	nt_field_t *mp_field_fec_stat_fec_lane_algn;
+
+	/* FEC Corrected code word count */
+	nt_register_t *mp_reg_fec_cw_cnt;
+	nt_field_t *mp_field_fec_cw_cnt_cw_cnt;
+
+	/* FEC Uncorrected code word count */
+	nt_register_t *mp_reg_fec_ucw_cnt;
+	nt_field_t *mp_field_fec_ucw_cnt_ucw_cnt;
+
+	/* GTY_RX_BUF_STAT */
+	nt_register_t *mp_reg_gty_rx_buf_stat;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat3;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed0;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed1;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed2;
+	nt_field_t *mp_field_gty_rx_buf_stat_rx_buf_stat_changed3;
+
+	/* GTY_PRE_CURSOR */
+	nt_register_t *mp_reg_gty_pre_cursor;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr0;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr1;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr2;
+	nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr3;
+
+	/* GTY_DIFF_CTL */
+	nt_register_t *mp_reg_gty_diff_ctl;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl0;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl1;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl2;
+	nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl3;
+
+	/* GTY_POST_CURSOR */
+	nt_register_t *mp_reg_gty_post_cursor;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr0;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr1;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr2;
+	nt_field_t *mp_field_gty_post_cursor_tx_post_csr3;
+
+	/* GTY_CTL */
+	nt_register_t *mp_reg_gty_ctl;
+	nt_register_t *mp_reg_gty_ctl_tx;
+	nt_field_t *mp_field_gty_ctl_tx_pol0;
+	nt_field_t *mp_field_gty_ctl_tx_pol1;
+	nt_field_t *mp_field_gty_ctl_tx_pol2;
+	nt_field_t *mp_field_gty_ctl_tx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_pol0;
+	nt_field_t *mp_field_gty_ctl_rx_pol1;
+	nt_field_t *mp_field_gty_ctl_rx_pol2;
+	nt_field_t *mp_field_gty_ctl_rx_pol3;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en0;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en1;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en2;
+	nt_field_t *mp_field_gty_ctl_rx_lpm_en3;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst0;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst1;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst2;
+	nt_field_t *mp_field_gty_ctl_rx_equa_rst3;
+
+	/* DEBOUNCE_CTRL */
+	nt_register_t *mp_reg_debounce_ctrl;
+	nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+	/* TIMESTAMP_COMP */
+	nt_register_t *mp_reg_time_stamp_comp;
+	nt_field_t *mp_field_time_stamp_comp_rx_dly;
+	nt_field_t *mp_field_time_stamp_comp_tx_dly;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx;
+
+	/* STAT_PCS_RX */
+	nt_register_t *mp_reg_stat_pcs_rx_latch;
+
+	/* PHYMAC_MISC */
+	nt_register_t *mp_reg_phymac_misc;
+
+	/* BLOCK_LOCK */
+	nt_register_t *mp_reg_block_lock;
+};
+
+typedef struct nthw_mac_pcs nthw_mac_pcs_t;
+typedef struct nthw_mac_pcs nthw_mac_pcs;
+
+nthw_mac_pcs_t *nthw_mac_pcs_new(void);
+int nthw_mac_pcs_init(nthw_mac_pcs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_mac_pcs_delete(nthw_mac_pcs_t *p);
+
+bool nthw_mac_pcs_is_block_and_lane_lock_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_rx_reset_done(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_is_gt_fsm_tx_reset_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_path_rst(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_rx_path_rst(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_rx_path_rst(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_rx_force_resync(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_tx_send_rfi(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_is_dd_r3_calib_done(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_tx_host_enable(nthw_mac_pcs_t *p,
+			     bool enable); /* wrapper - for ease of use */
+void nthw_mac_pcs_set_rx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_enable(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_host(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_tx_sel_tfg(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_ts_eop(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_host_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_set_line_loopback(nthw_mac_pcs_t *p, bool enable);
+void nthw_mac_pcs_reset_bip_counters(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_get_status(nthw_mac_pcs_t *p, uint8_t *status);
+bool nthw_mac_pcs_get_hi_ber(nthw_mac_pcs_t *p);
+
+void nthw_mac_pcs_get_link_summary1(nthw_mac_pcs_t *p, uint32_t *p_status,
+				uint32_t *p_status_latch, uint32_t *p_aligned,
+				uint32_t *p_local_fault, uint32_t *p_remote_fault);
+
+void nthw_mac_pcs_get_link_summary(nthw_mac_pcs_t *p, uint32_t *p_abs,
+			       uint32_t *p_nt_phy_link_state, uint32_t *p_lh_abs,
+			       uint32_t *p_ll_nt_phy_link_state,
+			       uint32_t *p_link_down_cnt, uint32_t *p_nim_interr,
+			       uint32_t *p_lh_local_fault,
+			       uint32_t *p_lh_remote_fault, uint32_t *p_local_fault,
+			       uint32_t *p_remote_fault);
+
+bool nthw_mac_pcs_reset_required(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_fec(nthw_mac_pcs_t *p, bool enable);
+bool nthw_mac_pcs_get_fec_bypass(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_valid(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_aligned(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_any_am_locked(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_fec_stat_all_am_locked(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_dump_fec_stat_fields(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_reset_fec_counters(nthw_mac_pcs_t *p);
+bool nthw_mac_pcs_get_gty_rx_buf_stat_error(nthw_mac_pcs_t *p);
+void nthw_mac_pcs_set_gty_tx_tuning(nthw_mac_pcs_t *p, uint8_t lane, uint8_t tx_pre_csr,
+			       uint8_t tx_diff_ctl, uint8_t tx_post_csr);
+void nthw_mac_pcs_swap_gty_tx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_swap_gty_rx_polarity(nthw_mac_pcs_t *p, uint8_t lane, bool swap);
+void nthw_mac_pcs_set_receiver_equalization_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_led_mode(nthw_mac_pcs_t *p, uint8_t mode);
+void nthw_mac_pcs_set_timestamp_comp_rx(nthw_mac_pcs_t *p, uint16_t rx_dly);
+void nthw_mac_pcs_set_port_no(nthw_mac_pcs_t *p, uint8_t port_no);
+
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_block_lock_lock_mask(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock(nthw_mac_pcs_t *p);
+uint32_t nthw_mac_pcs_get_fld_lane_lock_lock_mask(nthw_mac_pcs_t *p);
+
+#endif /* NTHW_MAC_PCS_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
new file mode 100644
index 0000000000..d8e1f0de5d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.c
@@ -0,0 +1,1631 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_mac_pcs_xxv.h"
+
+static void nthw_mac_pcs_xxv_field_set_or_clr_flush(const nt_field_t *f, bool set)
+{
+	if (f) {
+		field_get_updated(f);
+		if (set)
+			field_set_flush(f);
+		else
+			field_clr_flush(f);
+	}
+}
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void)
+{
+	nthw_mac_pcs_xxv_t *p = malloc(sizeof(nthw_mac_pcs_xxv_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+	return p;
+}
+
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_mac_pcs_xxv_t));
+		free(p);
+	}
+}
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return index == 0 ? (uint8_t)p->mn_instance : index;
+}
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no)
+{
+	p->m_port_no = port_no;
+}
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index)
+{
+	struct nthw_mac_pcs_xxv__registers_fields *r =
+			&p->regs[index]; /* register and fields */
+
+	assert(p);
+
+	register_update(r->mp_reg_link_summary);
+	if (p_abs)
+		*p_abs = field_get_val32(r->mp_fld_link_summary_abs);
+	if (p_nt_phy_link_state) {
+		*p_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_nt_phy_link_state);
+	}
+	if (p_lh_abs)
+		*p_lh_abs = field_get_val32(r->mp_fld_link_summary_lh_abs);
+	if (p_ll_nt_phy_link_state) {
+		*p_ll_nt_phy_link_state =
+			field_get_val32(r->mp_fld_link_summary_ll_nt_phy_link_state);
+	}
+	if (p_link_down_cnt)
+		*p_link_down_cnt = field_get_val32(r->mp_fld_link_summary_link_down_cnt);
+	if (p_nim_interr)
+		*p_nim_interr = field_get_val32(r->mp_fld_link_summary_nim_interr);
+	if (p_lh_local_fault) {
+		*p_lh_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_local_fault);
+	}
+	if (p_lh_remote_fault) {
+		*p_lh_remote_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_remote_fault);
+	}
+	if (p_lh_internal_local_fault) {
+		*p_lh_internal_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_internal_local_fault);
+	}
+	if (p_lh_received_local_fault) {
+		*p_lh_received_local_fault =
+			field_get_val32(r->mp_fld_link_summary_lh_received_local_fault);
+	}
+}
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_rx_force_resync;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_gt_data;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_an_lt;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_speed_ctrl;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_rfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_lfi;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_core_conf);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_lfi, enable_lfi);
+	field_set_val32(p->regs[index].mp_fld_core_conf_tx_send_rfi, enable_rfi);
+	register_flush(p->regs[index].mp_reg_core_conf, 1);
+}
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+
+	/* NOTE: DFE is enabled when LPM is disabled */
+	return !field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *f = p->regs[index].mp_fld_gty_ctl_rx_lpm_en;
+	const bool set_dfe =
+		!enable; /* NOTE: Enable DFE mode means setting LPM = 0 */
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, set_dfe);
+
+	/* Toggle GTY_CTL_RX->EQUA_RST to 1 and 0 to assert new LPM_EN setting */
+	f = p->regs[index].mp_fld_gty_ctl_rx_equa_rst;
+	field_get_updated(f);
+	field_set_val_flush32(f, 1); /* Assert    GTH SOFT RESET */
+	field_get_updated(f);
+	field_set_val_flush32(f, 0); /* De-Assert GTH SOFT RESET */
+	field_get_updated(f);
+}
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_rx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_polarity;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_ctl_tx_inhibit;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_gty_loop_gt_loop;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, enable ? 2U : 0U);
+}
+
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_line_loopback;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_rx_rst);
+}
+
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_sub_rst_status_user_tx_rst);
+}
+
+/*
+ * QPLL lock signal.
+ * For cores capable of 10G only, there are only 1 QPLL. For cores capable of
+ * 10G/25G, there are 2 QPLLs.
+ */
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (field_get_updated(p->regs[index].mp_fld_sub_rst_status_qpll_lock) ==
+		3);
+}
+
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return (nthw_mac_pcs_xxv_is_qpll_lock(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_rx_rst(p, index) &&
+		!nthw_mac_pcs_xxv_is_user_tx_rst(p, index));
+}
+
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_aneg_config_enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_send_idle;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_tx_ins_fcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	return field_get_updated(f) != 0; /* 0 = 25g, 1 = 10g */
+}
+
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed10_g;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_link_speed_toggle;
+
+	field_get_updated(f);
+	field_set_flush(f);
+}
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_rs_fec_conf_rs_fec_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index)
+{
+	const nt_field_t *const f =
+		p->regs[index].mp_field_debounce_ctrl_nt_port_ctrl;
+
+	field_get_updated(f);
+	field_set_val_flush32(f, mode);
+}
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_rx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_sub_rst_tx_mac_pcs;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	register_update(p->regs[index].mp_reg_rs_fec_ccw);
+	register_update(p->regs[index].mp_reg_rs_fec_ucw);
+
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_CW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt));
+	}
+	if (field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt)) {
+		NT_LOG(DBG, NTHW, "Port %u: FEC_UCW_CNT: %u", p->m_port_no,
+		       field_get_val32(p->regs[index].mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt));
+	}
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_rx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_rx_dly, rx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_timestamp_comp_tx_dly);
+	field_set_val_flush32(p->regs[index].mp_field_timestamp_comp_tx_dly, tx_dly);
+}
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_core_conf_ts_at_eop;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl);
+	field_set_val_flush32(p->regs[index].mp_field_gty_gty_diff_ctl_tx_diff_ctl,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_pre_cursor_tx_pre_csr, value);
+}
+
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index)
+{
+	field_get_updated(p->regs[index].mp_field_gty_post_cursor_tx_post_csr);
+	field_set_val_flush32(p->regs[index].mp_field_gty_post_cursor_tx_post_csr,
+			    value);
+}
+
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_lt_conf_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec91_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_rs_fec_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_fec74_request;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_enable;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index)
+{
+	const nt_field_t *const f = p->regs[index].mp_fld_aneg_config_bypass;
+
+	nthw_mac_pcs_xxv_field_set_or_clr_flush(f, enable);
+}
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index)
+{
+	if (dac_mode == NTHW_MAC_PCS_XXV_DAC_OFF) {
+		nthw_mac_pcs_xxv_reset_an_lt(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_aneg_config_bypass(p, true, index);
+		nthw_mac_pcs_xxv_set_lt_conf_enable(p, false, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, true, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, true, index);
+		nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(p, false, index);
+		nthw_mac_pcs_xxv_reset_rx_gt_data(p, false, index);
+		nthw_mac_pcs_xxv_reset_tx_gt_data(p, false, index);
+
+		return;
+	}
+	assert(0); /* If you end up here you need to implement other DAC modes */
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_fec74_lock);
+}
+
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index)
+{
+	return field_get_updated(p->regs[index].mp_fld_link_summary_ll_rx_rsfec_lane_alignment);
+}
+
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_MAC_PCS_XXV, n_instance);
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	const bool m_mac8x10_g = false;
+	nt_module_t *module = p_mod;
+	uint64_t n_module_version_packed64 = -1;
+	nt_register_t *r;
+	nt_register_t *(*get_register)(nt_module_t *, uint32_t) =
+		module_get_register;
+	nt_field_t *(*get_field)(const nt_register_t *, uint32_t) =
+		register_get_field;
+	nt_field_t *(*query_field)(const nt_register_t *, uint32_t) =
+		register_query_field;
+	struct nthw_mac_pcs_xxv__registers_fields *rf;
+
+	if (p == NULL)
+		return (p_mod == NULL ? -1 : 0);
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_mac_pcs_xxv = p_mod;
+
+	memset(p->regs, 0, sizeof(p->regs));
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_mac_pcs_xxv);
+	switch (n_module_version_packed64) {
+	case (0UL << 32) | 0UL: /* 0.0 */
+	case (0UL << 32) | 1UL: /* 0.1 */
+	case (0UL << 32) | 2UL: /* 0.2 */
+		NT_LOG(DBG, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		break;
+	default:
+		NT_LOG(ERR, NTHW,
+		       "%s: MAC_PCS_XXV instance=%d: version=0x%08lX: unsupported module version\n",
+		       p_adapter_id_str, p->mn_instance, n_module_version_packed64);
+		return -1;
+	}
+
+	assert(n_channels == 1 || n_channels == 2 || n_channels == 4);
+
+	/* Register MAC_PCS_XXV_CORE_CONF_0 -- MAC_PCS_XXV_CORE_CONF_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_0);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_0_TS_AT_EOP);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_1);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_1_TS_AT_EOP);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_2);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_2_TS_AT_EOP);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_CORE_CONF_3);
+
+		rf->mp_reg_core_conf = r;
+		rf->mp_fld_core_conf_rx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_ENABLE);
+		rf->mp_fld_core_conf_rx_force_resync =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_RX_FORCE_RESYNC);
+		rf->mp_fld_core_conf_tx_enable =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_ENABLE);
+		rf->mp_fld_core_conf_tx_ins_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_INS_FCS);
+		rf->mp_fld_core_conf_tx_ign_fcs =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_IGN_FCS);
+		rf->mp_fld_core_conf_tx_send_lfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_LFI);
+		rf->mp_fld_core_conf_tx_send_rfi =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_RFI);
+		rf->mp_fld_core_conf_tx_send_idle =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TX_SEND_IDLE);
+		rf->mp_fld_core_conf_inline_mode =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_INLINE_MODE);
+		rf->mp_fld_core_conf_line_loopback =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_LINE_LOOPBACK);
+		rf->mp_fld_core_conf_ts_at_eop =
+			get_field(r, MAC_PCS_XXV_CORE_CONF_3_TS_AT_EOP);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_ANEG_CONFIG_0 -- MAC_PCS_XXV_ANEG_CONFIG_3
+	 * and       MAC_PCS_XXV_ANEG_ABILITY_0 -- MAC_PCS_XXV_ANEG_ABILITY_3
+	 * and       MAC_PCS_XXV_LT_CONF_0 -- MAC_PCS_XXV_LT_CONF_3
+	 */
+	if (!m_mac8x10_g && n_channels < 4) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_0);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_0_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_0);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				query_field(r, MAC_PCS_XXV_ANEG_ABILITY_0_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_0);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_0_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_0_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels >= 2) {
+		/* 2 x 10 25 G */
+		/* ANEG_CONFIG */
+
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_1);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_1_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_1);
+		rf->mp_reg_aneg_ability = r;
+
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr) {
+			rf->mp_fld_aneg_ability25_g_base_cr =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR_S);
+		if (!rf->mp_fld_aneg_ability25_g_base_crs) {
+			rf->mp_fld_aneg_ability25_g_base_crs =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR_S);
+		}
+
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			query_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_BASE25G_CR1);
+		if (!rf->mp_fld_aneg_ability25_g_base_cr1) {
+			rf->mp_fld_aneg_ability25_g_base_cr1 =
+				get_field(r, MAC_PCS_XXV_ANEG_ABILITY_1_25GBASE_CR1);
+		}
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_1);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_1_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_1_SEED);
+	}
+
+	if (!m_mac8x10_g && n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_2);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_2_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_2);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_2_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_2);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_2_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_2_SEED);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_ANEG_CONFIG_3);
+		rf->mp_reg_aneg_config = r;
+		rf->mp_fld_aneg_config_enable =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ENABLE);
+		rf->mp_fld_aneg_config_bypass =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_BYPASS);
+		rf->mp_fld_aneg_config_restart =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RESTART);
+		rf->mp_fld_aneg_config_pseudo =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PSEUDO);
+		rf->mp_fld_aneg_config_nonce_seed =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_NONCE_SEED);
+		rf->mp_fld_aneg_config_remote_fault =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_REMOTE_FAULT);
+		rf->mp_fld_aneg_config_pause =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_PAUSE);
+		rf->mp_fld_aneg_config_asmdir =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_ASMDIR);
+		rf->mp_fld_aneg_config_fec74_request10_g =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST_10G);
+		rf->mp_fld_aneg_config_hide_fec74 =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_HIDE_FEC74);
+		rf->mp_fld_aneg_config_fec74_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC74_REQUEST);
+		rf->mp_fld_aneg_config_fec91_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_REQUEST);
+		rf->mp_fld_aneg_config_fec91_ability =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_FEC91_ABILITY);
+		rf->mp_fld_aneg_config_rs_fec_request =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_RS_FEC_REQUEST);
+		rf->mp_fld_aneg_config_sw_fec_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_FEC_OVERWRITE);
+		rf->mp_fld_aneg_config_sw_speed_overwrite =
+			get_field(r, MAC_PCS_XXV_ANEG_CONFIG_3_SW_SPEED_OVERWRITE);
+
+		/* ANEG_ABILITY */
+		r = get_register(module, MAC_PCS_XXV_ANEG_ABILITY_3);
+		rf->mp_reg_aneg_ability = r;
+		rf->mp_fld_aneg_ability25_g_base_cr =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR);
+		rf->mp_fld_aneg_ability25_g_base_crs =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR_S);
+		rf->mp_fld_aneg_ability25_g_base_cr1 =
+			get_field(r, MAC_PCS_XXV_ANEG_ABILITY_3_25GBASE_CR1);
+
+		/* LT_CONF */
+		r = get_register(module, MAC_PCS_XXV_LT_CONF_3);
+		rf->mp_reg_lt_conf = r;
+		rf->mp_fld_lt_conf_enable =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_ENABLE);
+		rf->mp_fld_lt_conf_restart =
+			get_field(r, MAC_PCS_XXV_LT_CONF_3_RESTART);
+		rf->mp_fld_lt_conf_seed = get_field(r, MAC_PCS_XXV_LT_CONF_3_SEED);
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_SUB_RST_0 -- MAC_PCS_XXV_SUB_RST_3
+	 * and       MAC_PCS_XXV_SUB_RST_STATUS_0 -- MAC_PCS_XXV_SUB_RST_STATUS_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_0);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_0_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_0_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_0_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_0);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_0_QPLL_LOCK);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_1);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_1_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_1_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_1_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_1);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_1_QPLL_LOCK);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_2);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_2_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_2_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_2_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_2);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_2_QPLL_LOCK);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_3);
+
+		rf->mp_reg_sub_rst = r;
+		rf->mp_fld_sub_rst_rx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_MAC_PCS);
+		rf->mp_fld_sub_rst_tx_mac_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_MAC_PCS);
+		rf->mp_fld_sub_rst_rx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_GT_DATA);
+		rf->mp_fld_sub_rst_tx_gt_data =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_GT_DATA);
+		rf->mp_fld_sub_rst_rx_buf =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_BUF);
+		rf->mp_fld_sub_rst_rx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PMA);
+		rf->mp_fld_sub_rst_tx_pma =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PMA);
+		rf->mp_fld_sub_rst_rx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_RX_PCS);
+		rf->mp_fld_sub_rst_tx_pcs =
+			get_field(r, MAC_PCS_XXV_SUB_RST_3_TX_PCS);
+		rf->mp_fld_sub_rst_an_lt = get_field(r, MAC_PCS_XXV_SUB_RST_3_AN_LT);
+		rf->mp_fld_sub_rst_speed_ctrl =
+			query_field(r, MAC_PCS_XXV_SUB_RST_3_SPEED_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_SUB_RST_STATUS_3);
+		rf->mp_reg_sub_rst_status = r;
+		rf->mp_fld_sub_rst_status_user_rx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_RX_RST);
+		rf->mp_fld_sub_rst_status_user_tx_rst =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_USER_TX_RST);
+		rf->mp_fld_sub_rst_status_qpll_lock =
+			get_field(r, MAC_PCS_XXV_SUB_RST_STATUS_3_QPLL_LOCK);
+	}
+
+	/* Registers MAC_PCS_XXV_LINK_SUMMARY_0 -- MAC_PCS_XXV_LINK_SUMMARY_3 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_0);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_0_NIM_INTERR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_1);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_1_NIM_INTERR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_2);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_2_NIM_INTERR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+		r = get_register(module, MAC_PCS_XXV_LINK_SUMMARY_3);
+
+		rf->mp_reg_link_summary = r;
+		rf->mp_fld_link_summary_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NT_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_ll_nt_phy_link_state =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_PHY_LINK_STATE);
+		rf->mp_fld_link_summary_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_ABS);
+		rf->mp_fld_link_summary_lh_abs =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_ABS);
+		rf->mp_fld_link_summary_link_down_cnt =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LINK_DOWN_CNT);
+		if (!m_mac8x10_g) {
+			rf->mp_fld_link_summary_ll_rx_fec74_lock =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_FEC74_LOCK);
+			rf->mp_fld_link_summary_lh_rx_rsfec_hi_ser =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_RSFEC_HI_SER);
+			rf->mp_fld_link_summary_ll_rx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_ll_tx_rsfec_lane_alignment =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_TX_RSFEC_LANE_ALIGNMENT);
+			rf->mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code =
+				get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_PCS_VALID_CTRL_CODE);
+		}
+		rf->mp_fld_link_summary_ll_rx_block_lock =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LL_RX_BLOCK_LOCK);
+		rf->mp_fld_link_summary_lh_rx_high_bit_error_rate =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RX_HIGH_BIT_ERROR_RATE);
+		;
+		rf->mp_fld_link_summary_lh_internal_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_INTERNAL_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_received_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_RECEIVED_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_local_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_LOCAL_FAULT);
+		rf->mp_fld_link_summary_lh_remote_fault =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_LH_REMOTE_FAULT);
+		rf->mp_fld_link_summary_nim_interr =
+			get_field(r, MAC_PCS_XXV_LINK_SUMMARY_3_NIM_INTERR);
+	}
+
+	/*
+	 *  Registers MAC_PCS_XXV_GTY_LOOP_0 -- MAC_PCS_XXV_GTY_LOOP_3
+	 * and       MAC_PCS_XXV_GTY_CTL_RX_0 -- MAC_PCS_XXV_GTY_CTL_RX_3
+	 * and       MAC_PCS_XXV_GTY_CTL_TX_0 -- MAC_PCS_XXV_GTY_CTL_TX_3
+	 * and       MAC_PCS_XXV_LINK_SPEED_0 -- MAC_PCS_XXV_LINK_SPEED_3
+	 * and       MAC_PCS_XXV_RS_FEC_CONF_0 -- MAC_PCS_XXV_RS_FEC_CONF_0
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_0);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_0_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_0);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_0_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_0);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_0_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_0);
+
+			rf->mp_fld_link_speed10_g =
+				query_field(r, MAC_PCS_XXV_LINK_SPEED_0_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_0_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_0_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_0);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_0_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_0);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_0_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_0);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_0_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_1);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_1_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_1);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_1_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_1);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_1_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_1);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_1_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_1_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_1);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_1_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_1);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_1_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_1);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_1_RS_FEC_UCW_CNT);
+		}
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_2);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_2_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_2);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_2_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_2);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_2_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_2);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_2_10G);
+			}
+
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_2_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_2);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_2_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_2);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_2_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_2);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_2_RS_FEC_UCW_CNT);
+		}
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_GTY_LOOP_3);
+		rf->mp_reg_gty_loop = r;
+		rf->mp_fld_gty_loop_gt_loop =
+			get_field(r, MAC_PCS_XXV_GTY_LOOP_3_GT_LOOP);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_RX_3);
+		rf->mp_reg_gty_ctl_rx = r;
+		rf->mp_fld_gty_ctl_rx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_POLARITY);
+		rf->mp_fld_gty_ctl_rx_lpm_en =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_LPM_EN);
+		rf->mp_fld_gty_ctl_rx_equa_rst =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_RX_3_EQUA_RST);
+
+		r = get_register(module, MAC_PCS_XXV_GTY_CTL_TX_3);
+		rf->mp_fld_gty_ctl_tx_polarity =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_POLARITY);
+		rf->mp_fld_gty_ctl_tx_inhibit =
+			get_field(r, MAC_PCS_XXV_GTY_CTL_TX_3_INHIBIT);
+
+		if (!m_mac8x10_g) {
+			r = get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+			rf->mp_reg_link_speed =
+				get_register(module, MAC_PCS_XXV_LINK_SPEED_3);
+
+			rf->mp_fld_link_speed10_g =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_SPEED);
+			if (!rf->mp_fld_link_speed10_g) {
+				rf->mp_fld_link_speed10_g =
+					get_field(r, MAC_PCS_XXV_LINK_SPEED_3_10G);
+			}
+			rf->mp_fld_link_speed_toggle =
+				get_field(r, MAC_PCS_XXV_LINK_SPEED_3_TOGGLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CONF_3);
+			rf->mp_reg_rs_fec_conf = r;
+			rf->mp_fld_rs_fec_conf_rs_fec_enable =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CONF_3_RS_FEC_ENABLE);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_CCW_CNT_3);
+			rf->mp_reg_rs_fec_ccw = r;
+			rf->mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_CCW_CNT_3_RS_FEC_CCW_CNT);
+
+			r = get_register(module, MAC_PCS_XXV_RS_FEC_UCW_CNT_3);
+			rf->mp_reg_rs_fec_ucw = r;
+			rf->mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt =
+				get_field(r, MAC_PCS_XXV_RS_FEC_UCW_CNT_3_RS_FEC_UCW_CNT);
+		}
+	}
+
+	/*
+	 * Registers MAC_PCS_XXV_DEBOUNCE_CTRL_0 -- MAC_PCS_XXV_DEBOUNCE_CTRL_3
+	 * and       MAC_PCS_XXV_TIMESTAMP_COMP_0 -- MAC_PCS_XXV_TIMESTAMP_COMP_3
+	 * and       MAC_PCS_XXV_GTY_PRE_CURSOR_0 -- MAC_PCS_XXV_GTY_PRE_CURSOR_3
+	 * and       MAC_PCS_XXV_GTY_DIFF_CTL_0 -- MAC_PCS_XXV_GTY_DIFF_CTL_0
+	 * and       MAC_PCS_XXV_GTY_POST_CURSOR_0 -- MAC_PCS_XXV_GTY_POST_CURSOR_3
+	 */
+	if (n_channels < 4) {
+		/* Initialize regs/fields for sub-module/channel 0 */
+		rf = &p->regs[0];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_0);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_0_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_0);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_0_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				 MAC_PCS_XXV_GTY_PRE_CURSOR_0);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_0_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_0);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_0_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_0);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_0_TX_POST_CSR);
+	}
+
+	if (n_channels >= 2) {
+		/* Initialize regs/fields for sub-module/channel 1 */
+		rf = &p->regs[1];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_1);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_1_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_1);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_1_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_1);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_1_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_1);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_1_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_1);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_1_TX_POST_CSR);
+	}
+
+	if (n_channels == 4) {
+		/* Initialize regs/fields for sub-module/channel 2 */
+		rf = &p->regs[2];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_2);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_2_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_2);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_2_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_2);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_2_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_2);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_2_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_2);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_2_TX_POST_CSR);
+
+		/* Initialize regs/fields for sub-module/channel 3 */
+		rf = &p->regs[3];
+
+		r = get_register(module, MAC_PCS_XXV_DEBOUNCE_CTRL_3);
+
+		rf->mp_reg_debounce_ctrl = r;
+		rf->mp_field_debounce_ctrl_nt_port_ctrl =
+			get_field(r, MAC_PCS_XXV_DEBOUNCE_CTRL_3_NT_PORT_CTRL);
+
+		r = get_register(module, MAC_PCS_XXV_TIMESTAMP_COMP_3);
+		rf->mp_reg_timestamp_comp = r;
+		rf->mp_field_timestamp_comp_rx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_RX_DLY);
+		rf->mp_field_timestamp_comp_tx_dly =
+			get_field(r, MAC_PCS_XXV_TIMESTAMP_COMP_3_TX_DLY);
+
+		/* GTY_PRE_CURSOR */
+		r = get_register(p->mp_mod_mac_pcs_xxv,
+				MAC_PCS_XXV_GTY_PRE_CURSOR_3);
+		rf->mp_reg_gty_pre_cursor = r;
+		rf->mp_field_gty_pre_cursor_tx_pre_csr =
+			get_field(r, MAC_PCS_XXV_GTY_PRE_CURSOR_3_TX_PRE_CSR);
+
+		/* GTY_DIFF_CTL */
+		r = get_register(module, MAC_PCS_XXV_GTY_DIFF_CTL_3);
+		rf->mp_reg_gty_diff_ctl = r;
+		rf->mp_field_gty_gty_diff_ctl_tx_diff_ctl =
+			get_field(r, MAC_PCS_XXV_GTY_DIFF_CTL_3_TX_DIFF_CTL);
+
+		/* GTY_POST_CURSOR */
+		r = get_register(module, MAC_PCS_XXV_GTY_POST_CURSOR_3);
+		rf->mp_reg_gty_post_cursor = r;
+		rf->mp_field_gty_post_cursor_tx_post_csr =
+			get_field(r, MAC_PCS_XXV_GTY_POST_CURSOR_3_TX_POST_CSR);
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
new file mode 100644
index 0000000000..5a38494f7e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_mac_pcs_xxv.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_MAC_PCS_XXV_H_
+#define NTHW_MAC_PCS_XXV_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "nthw_fpga_model.h"
+
+enum nthw_mac_pcs_xxv_led_mode_e {
+	NTHW_MAC_PCS_XXV_LED_AUTO = 0x00,
+	NTHW_MAC_PCS_XXV_LED_ON = 0x01,
+	NTHW_MAC_PCS_XXV_LED_OFF = 0x02,
+	NTHW_MAC_PCS_XXV_LED_PORTID = 0x03,
+};
+
+enum nthw_mac_pcs_xxv_dac_mode_e {
+	NTHW_MAC_PCS_XXV_DAC_OFF = 0x00,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_N = 0x01,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_S = 0x02,
+	NTHW_MAC_PCS_XXV_DAC_CA_25G_L = 0x03,
+};
+
+struct nthw_mac_pcs_xxv {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_mac_pcs_xxv;
+	int mn_instance;
+
+	uint8_t m_port_no;
+
+#define NTHW_MAC_PCS_XXV_NUM_ELEMS 4
+	struct nthw_mac_pcs_xxv__registers_fields {
+		/* CORE_CONF */
+		nt_register_t *mp_reg_core_conf;
+		nt_field_t *mp_fld_core_conf_rx_enable;
+		nt_field_t *mp_fld_core_conf_rx_force_resync;
+		nt_field_t *mp_fld_core_conf_tx_enable;
+		nt_field_t *mp_fld_core_conf_tx_ins_fcs;
+		nt_field_t *mp_fld_core_conf_tx_ign_fcs;
+		nt_field_t *mp_fld_core_conf_tx_send_lfi;
+		nt_field_t *mp_fld_core_conf_tx_send_rfi;
+		nt_field_t *mp_fld_core_conf_tx_send_idle;
+		nt_field_t *mp_fld_core_conf_inline_mode;
+		nt_field_t *mp_fld_core_conf_line_loopback;
+		nt_field_t *mp_fld_core_conf_ts_at_eop;
+
+		/* ANEG_CONFIG */
+		nt_register_t *mp_reg_aneg_config;
+		nt_field_t *mp_fld_aneg_config_enable;
+		nt_field_t *mp_fld_aneg_config_bypass;
+		nt_field_t *mp_fld_aneg_config_restart;
+		nt_field_t *mp_fld_aneg_config_pseudo;
+		nt_field_t *mp_fld_aneg_config_nonce_seed;
+		nt_field_t *mp_fld_aneg_config_remote_fault;
+		nt_field_t *mp_fld_aneg_config_pause;
+		nt_field_t *mp_fld_aneg_config_asmdir;
+		nt_field_t *mp_fld_aneg_config_fec74_request10_g;
+		nt_field_t *mp_fld_aneg_config_hide_fec74;
+		nt_field_t *mp_fld_aneg_config_fec74_request;
+		nt_field_t *mp_fld_aneg_config_fec91_request;
+		nt_field_t *mp_fld_aneg_config_fec91_ability;
+		nt_field_t *mp_fld_aneg_config_rs_fec_request;
+		nt_field_t *mp_fld_aneg_config_sw_fec_overwrite;
+		nt_field_t *mp_fld_aneg_config_sw_speed_overwrite;
+
+		/* ANEG_ABILITY */
+		nt_register_t *mp_reg_aneg_ability;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr;
+		nt_field_t *mp_fld_aneg_ability25_g_base_crs;
+		nt_field_t *mp_fld_aneg_ability25_g_base_cr1;
+
+		/* LT_CONF */
+		nt_register_t *mp_reg_lt_conf;
+		nt_field_t *mp_fld_lt_conf_enable;
+		nt_field_t *mp_fld_lt_conf_restart;
+		nt_field_t *mp_fld_lt_conf_seed;
+
+		/* SUB_RST */
+		nt_register_t *mp_reg_sub_rst;
+		nt_field_t *mp_fld_sub_rst_rx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_mac_pcs;
+		nt_field_t *mp_fld_sub_rst_rx_gt_data;
+		nt_field_t *mp_fld_sub_rst_tx_gt_data;
+		nt_field_t *mp_fld_sub_rst_rx_buf;
+		nt_field_t *mp_fld_sub_rst_rx_pma;
+		nt_field_t *mp_fld_sub_rst_tx_pma;
+		nt_field_t *mp_fld_sub_rst_rx_pcs;
+		nt_field_t *mp_fld_sub_rst_tx_pcs;
+		nt_field_t *mp_fld_sub_rst_an_lt;
+		nt_field_t *mp_fld_sub_rst_speed_ctrl;
+
+		/* SUB_RST_STATUS */
+		nt_register_t *mp_reg_sub_rst_status;
+		nt_field_t *mp_fld_sub_rst_status_user_rx_rst;
+		nt_field_t *mp_fld_sub_rst_status_user_tx_rst;
+		nt_field_t *mp_fld_sub_rst_status_qpll_lock;
+
+		/* LINK_SUMMARY */
+		nt_register_t *mp_reg_link_summary;
+		nt_field_t *mp_fld_link_summary_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_ll_nt_phy_link_state;
+		nt_field_t *mp_fld_link_summary_abs;
+		nt_field_t *mp_fld_link_summary_lh_abs;
+		nt_field_t *mp_fld_link_summary_link_down_cnt;
+		/* Begin 2 x 10/25 Gbps only fields: */
+		nt_field_t *mp_fld_link_summary_ll_rx_fec74_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_rsfec_hi_ser;
+		nt_field_t *mp_fld_link_summary_ll_rx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_ll_tx_rsfec_lane_alignment;
+		nt_field_t *mp_fld_link_summary_lh_rx_pcs_valid_ctrl_code;
+		/* End 2 x 10/25 Gbps only fields. */
+		nt_field_t *mp_fld_link_summary_ll_rx_block_lock;
+		nt_field_t *mp_fld_link_summary_lh_rx_high_bit_error_rate;
+		nt_field_t *mp_fld_link_summary_lh_internal_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_received_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_local_fault;
+		nt_field_t *mp_fld_link_summary_lh_remote_fault;
+		nt_field_t *mp_fld_link_summary_lh_tx_local_fault;
+		nt_field_t *mp_fld_link_summary_nim_interr;
+
+		/* GTY_LOOP */
+		nt_register_t *mp_reg_gty_loop;
+		nt_field_t *mp_fld_gty_loop_gt_loop;
+
+		/* GTY_CTL_RX */
+		nt_register_t *mp_reg_gty_ctl_rx;
+		nt_field_t *mp_fld_gty_ctl_rx_polarity;
+		nt_field_t *mp_fld_gty_ctl_rx_lpm_en;
+		nt_field_t *mp_fld_gty_ctl_rx_equa_rst;
+
+		/* GTY_CTL_TX */
+		nt_register_t *mp_reg_gty_ctl_tx;
+		nt_field_t *mp_fld_gty_ctl_tx_polarity;
+		nt_field_t *mp_fld_gty_ctl_tx_inhibit;
+
+		/* LINK_SPEED */
+		nt_register_t *mp_reg_link_speed;
+		nt_field_t *mp_fld_link_speed10_g;
+		nt_field_t *mp_fld_link_speed_toggle;
+
+		/* RS_FEC_CONF */
+		nt_register_t *mp_reg_rs_fec_conf;
+		nt_field_t *mp_fld_rs_fec_conf_rs_fec_enable;
+
+		/* DEBOUNCE_CTRL */
+		nt_register_t *mp_reg_debounce_ctrl;
+		nt_field_t *mp_field_debounce_ctrl_nt_port_ctrl;
+
+		/* FEC_CCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ccw;
+		nt_field_t *mp_field_reg_rs_fec_ccw_reg_rs_fec_ccw_cnt;
+
+		/* FEC_UCW_CNT */
+		nt_register_t *mp_reg_rs_fec_ucw;
+		nt_field_t *mp_field_reg_rs_fec_ucw_reg_rs_fec_ucw_cnt;
+
+		/* TIMESTAMP_COMP */
+		nt_register_t *mp_reg_timestamp_comp;
+		nt_field_t *mp_field_timestamp_comp_rx_dly;
+		nt_field_t *mp_field_timestamp_comp_tx_dly;
+
+		/* GTY_PRE_CURSOR */
+		nt_register_t *mp_reg_gty_pre_cursor;
+		nt_field_t *mp_field_gty_pre_cursor_tx_pre_csr;
+
+		/* GTY_DIFF_CTL */
+		nt_register_t *mp_reg_gty_diff_ctl;
+		nt_field_t *mp_field_gty_gty_diff_ctl_tx_diff_ctl;
+
+		/* GTY_POST_CURSOR */
+		nt_register_t *mp_reg_gty_post_cursor;
+		nt_field_t *mp_field_gty_post_cursor_tx_post_csr;
+	} regs[NTHW_MAC_PCS_XXV_NUM_ELEMS];
+};
+
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv_t;
+typedef struct nthw_mac_pcs_xxv nthw_mac_pcs_xxv;
+
+nthw_mac_pcs_xxv_t *nthw_mac_pcs_xxv_new(void);
+void nthw_mac_pcs_xxv_delete(nthw_mac_pcs_xxv_t *p);
+int nthw_mac_pcs_xxv_init(nthw_mac_pcs_xxv_t *p, nt_fpga_t *p_fpga, int n_instance,
+		       int n_channels);
+
+void nthw_mac_pcs_xxv_get_link_summary(nthw_mac_pcs_xxv_t *p,
+	uint32_t *p_abs, uint32_t *p_nt_phy_link_state,
+	uint32_t *p_lh_abs, uint32_t *p_ll_nt_phy_link_state, uint32_t *p_link_down_cnt,
+	uint32_t *p_nim_interr, uint32_t *p_lh_local_fault, uint32_t *p_lh_remote_fault,
+	uint32_t *p_lh_internal_local_fault, uint32_t *p_lh_received_local_fault,
+	uint8_t index);
+
+uint8_t nthw_mac_pcs_xxv_get_port_no(const nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_port_no(nthw_mac_pcs_xxv_t *p, uint8_t port_no);
+
+void nthw_mac_pcs_xxv_set_tx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_rx_enable(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_rx_force_resync(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_rx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_reset_tx_gt_data(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_an_lt(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_reset_speed_ctrl(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_send_lfi_tx_send_rfi(nthw_mac_pcs_xxv_t *p, bool enable_lfi,
+		bool enable_rfi, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_dfe_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_dfe(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_gty_polarity(nthw_mac_pcs_xxv_t *p, bool enable,
+				    uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_gty_inhibit(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_host_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_line_loopback(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_user_rx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_user_tx_rst(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+bool nthw_mac_pcs_xxv_is_qpll_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_sub_rst_ready(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_is_aneg_enabled(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_tx_send_idle(nthw_mac_pcs_xxv_t *p, bool enable,
+				 uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_ins_fcs(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_link_speed10_g(nthw_mac_pcs_xxv_t *p, uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed10_g(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+void nthw_mac_pcs_xxv_set_link_speed_toggle(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rs_fec_conf_rs_fec_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+void nthw_mac_pcs_xxv_set_led_mode(nthw_mac_pcs_xxv_t *p, uint8_t mode, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_rx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+void nthw_mac_pcs_xxv_set_tx_mac_pcs_rst(nthw_mac_pcs_xxv_t *p, bool enable,
+				  uint8_t index);
+
+void nthw_mac_pcs_xxv_reset_fec_counters(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_gty_diff(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_pre(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+void nthw_mac_pcs_xxv_set_gty_post(nthw_mac_pcs_xxv_t *p, uint8_t value, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_ts_at_eop(nthw_mac_pcs_xxv_t *p, bool enable, uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_bypass(nthw_mac_pcs_xxv_t *p, bool enable,
+				       uint8_t index);
+void nthw_mac_pcs_xxv_set_lt_conf_enable(nthw_mac_pcs_xxv_t *p, bool enable,
+				   uint8_t index);
+
+void nthw_mac_pcs_xxv_set_dac_mode(nthw_mac_pcs_xxv_t *p, uint8_t dac_mode,
+			      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_timestamp_comp_rx(nthw_mac_pcs_xxv_t *p, uint16_t rx_dly,
+				      uint8_t index);
+void nthw_mac_pcs_xxv_set_timestamp_comp_tx(nthw_mac_pcs_xxv_t *p, uint16_t tx_dly,
+				      uint8_t index);
+
+void nthw_mac_pcs_xxv_set_aneg_config_fec91_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_rs_fec_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+void nthw_mac_pcs_xxv_set_aneg_config_fec74_request(nthw_mac_pcs_xxv_t *p, bool enable,
+		uint8_t index);
+
+bool nthw_mac_pcs_xxv_get_ll_rx_fec74_lock(nthw_mac_pcs_xxv_t *p, uint8_t index);
+bool nthw_mac_pcs_xxv_get_ll_rx_rsfec_lane_alignment(nthw_mac_pcs_xxv_t *p, uint8_t index);
+
+#endif /* NTHW_MAC_PCS_XXV_H_ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
new file mode 100644
index 0000000000..92089d2fa3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_rd_tg.h"
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void)
+{
+	nthw_pci_rd_tg_t *p = malloc(sizeof(nthw_pci_rd_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+	return p;
+}
+
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_rd_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_RD_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_RD_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_rd_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_rd_tg_rd_data0 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA0);
+	p->mp_fld_pci_rd_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data0,
+				   PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_rd_tg_rd_data1 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA1);
+	p->mp_fld_pci_rd_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data1,
+				   PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_rd_tg_rd_data2 =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDDATA2);
+	p->mp_fld_pci_rd_tg_req_size =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_SIZE);
+	p->mp_fld_pci_rd_tg_wait =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WAIT);
+	p->mp_fld_pci_rd_tg_wrap =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_rd_tg_req_hid =
+		register_query_field(p->mp_reg_pci_rd_tg_rd_data2, PCI_RD_TG_TG_RDDATA2_REQ_HID);
+
+	p->mp_reg_pci_rd_tg_rd_addr =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RDADDR);
+	p->mp_fld_pci_rd_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_addr, PCI_RD_TG_TG_RDADDR_RAM_ADDR);
+
+	p->mp_reg_pci_rd_tg_rd_run =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_RD_RUN);
+	p->mp_fld_pci_rd_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_run, PCI_RD_TG_TG_RD_RUN_RD_ITERATION);
+
+	p->mp_reg_pci_rd_tg_rd_ctrl =
+		module_get_register(p->mp_mod_pci_rd_tg, PCI_RD_TG_TG_CTRL);
+	p->mp_fld_pci_rd_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_rd_tg_rd_ctrl, PCI_RD_TG_TG_CTRL_TG_RD_RDY);
+
+	return 0;
+}
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap)
+{
+	field_set_val32(p->mp_fld_pci_rd_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_rd_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_rd_tg_wrap, wrap);
+	field_flush_register(p->mp_fld_pci_rd_tg_wrap);
+}
+
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_rd_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_rd_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
new file mode 100644
index 0000000000..b1c912f0f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_rd_tg.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_RD_TG_H__
+#define __NTHW_PCI_RD_TG_H__
+
+struct nthw_pci_rd_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_rd_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data0;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data1;
+	nt_field_t *mp_fld_pci_rd_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_data2;
+	nt_field_t *mp_fld_pci_rd_tg_req_size;
+	nt_field_t *mp_fld_pci_rd_tg_req_hid;
+	nt_field_t *mp_fld_pci_rd_tg_wait;
+	nt_field_t *mp_fld_pci_rd_tg_wrap;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_addr;
+	nt_field_t *mp_fld_pci_rd_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_run;
+	nt_field_t *mp_fld_pci_rd_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_rd_tg_rd_ctrl;
+	nt_field_t *mp_fld_pci_rd_tg_ctrl_rdy;
+};
+
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg_t;
+typedef struct nthw_pci_rd_tg nthw_pci_rd_tg;
+
+nthw_pci_rd_tg_t *nthw_pci_rd_tg_new(void);
+void nthw_pci_rd_tg_delete(nthw_pci_rd_tg_t *p);
+int nthw_pci_rd_tg_init(nthw_pci_rd_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_rd_tg_set_phys_addr(nthw_pci_rd_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_rd_tg_set_ram_addr(nthw_pci_rd_tg_t *p, int n_ram_addr);
+void nthw_pci_rd_tg_set_ram_data(nthw_pci_rd_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap);
+void nthw_pci_rd_tg_set_run(nthw_pci_rd_tg_t *p, int n_iterations);
+uint32_t nthw_pci_rd_tg_get_ctrl_rdy(nthw_pci_rd_tg_t *p);
+
+#endif /* __NTHW_PCI_RD_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.c b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
new file mode 100644
index 0000000000..17e30a670d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_ta.h"
+
+nthw_pci_ta_t *nthw_pci_ta_new(void)
+{
+	nthw_pci_ta_t *p = malloc(sizeof(nthw_pci_ta_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+	return p;
+}
+
+void nthw_pci_ta_delete(nthw_pci_ta_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_ta_t));
+		free(p);
+	}
+}
+
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_TA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_TA %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_ta = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_ta_ctrl = module_get_register(p->mp_mod_pci_ta, PCI_TA_CONTROL);
+	p->mp_fld_pci_ta_ctrl_enable =
+		register_get_field(p->mp_reg_pci_ta_ctrl, PCI_TA_CONTROL_ENABLE);
+
+	p->mp_reg_pci_ta_packet_good =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_GOOD);
+	p->mp_fld_pci_ta_packet_good_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_good, PCI_TA_PACKET_GOOD_AMOUNT);
+
+	p->mp_reg_pci_ta_packet_bad =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PACKET_BAD);
+	p->mp_fld_pci_ta_packet_bad_amount =
+		register_get_field(p->mp_reg_pci_ta_packet_bad, PCI_TA_PACKET_BAD_AMOUNT);
+
+	p->mp_reg_pci_ta_length_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_LENGTH_ERROR);
+	p->mp_fld_pci_ta_length_error_amount =
+		register_get_field(p->mp_reg_pci_ta_length_error, PCI_TA_LENGTH_ERROR_AMOUNT);
+
+	p->mp_reg_pci_ta_payload_error =
+		module_get_register(p->mp_mod_pci_ta, PCI_TA_PAYLOAD_ERROR);
+	p->mp_fld_pci_ta_payload_error_amount =
+		register_get_field(p->mp_reg_pci_ta_payload_error, PCI_TA_PAYLOAD_ERROR_AMOUNT);
+
+	return 0;
+}
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val)
+{
+	field_set_val_flush32(p->mp_fld_pci_ta_ctrl_enable, val);
+}
+
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_good_amount);
+}
+
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_packet_bad_amount);
+}
+
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_length_error_amount);
+}
+
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val)
+{
+	*val = field_get_updated(p->mp_fld_pci_ta_payload_error_amount);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_ta.h b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
new file mode 100644
index 0000000000..7968cad9fa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_ta.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_TA_H__
+#define __NTHW_PCI_TA_H__
+
+struct nthw_pci_ta {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_ta;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_ta_ctrl;
+	nt_field_t *mp_fld_pci_ta_ctrl_enable;
+	nt_register_t *mp_reg_pci_ta_packet_good;
+	nt_field_t *mp_fld_pci_ta_packet_good_amount;
+	nt_register_t *mp_reg_pci_ta_packet_bad;
+	nt_field_t *mp_fld_pci_ta_packet_bad_amount;
+	nt_register_t *mp_reg_pci_ta_length_error;
+	nt_field_t *mp_fld_pci_ta_length_error_amount;
+	nt_register_t *mp_reg_pci_ta_payload_error;
+	nt_field_t *mp_fld_pci_ta_payload_error_amount;
+};
+
+typedef struct nthw_pci_ta nthw_pci_ta_t;
+typedef struct nthw_pci_ta nthw_pci_ta;
+
+nthw_pci_ta_t *nthw_pci_ta_new(void);
+void nthw_pci_ta_delete(nthw_pci_ta_t *p);
+int nthw_pci_ta_init(nthw_pci_ta_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_ta_set_control_enable(nthw_pci_ta_t *p, uint32_t val);
+void nthw_pci_ta_get_packet_good(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_packet_bad(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_length_error(nthw_pci_ta_t *p, uint32_t *val);
+void nthw_pci_ta_get_payload_error(nthw_pci_ta_t *p, uint32_t *val);
+
+#endif /* __NTHW_PCI_TA_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
new file mode 100644
index 0000000000..f830a586b2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pci_wr_tg.h"
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void)
+{
+	nthw_pci_wr_tg_t *p = malloc(sizeof(nthw_pci_wr_tg_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+	return p;
+}
+
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pci_wr_tg_t));
+		free(p);
+	}
+}
+
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCI_WR_TG, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCI_WR_TG %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pci_wr_tg = mod;
+
+	p->mn_param_pci_ta_tg_present =
+		fpga_get_product_param(p_fpga, NT_PCI_TA_TG_PRESENT, 1);
+
+	p->mp_reg_pci_wr_tg_data0 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA0);
+	p->mp_fld_pci_wr_tg_phys_addr_low =
+		register_get_field(p->mp_reg_pci_wr_tg_data0, PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW);
+
+	p->mp_reg_pci_wr_tg_data1 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA1);
+	p->mp_fld_pci_wr_tg_phys_addr_high =
+		register_get_field(p->mp_reg_pci_wr_tg_data1, PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH);
+
+	p->mp_reg_pci_wr_tg_data2 =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRDATA2);
+	p->mp_fld_pci_wr_tg_req_size =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_SIZE);
+	p->mp_fld_pci_wr_tg_inc_mode =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_INC_MODE);
+	p->mp_fld_pci_wr_tg_wait =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WAIT);
+	p->mp_fld_pci_wr_tg_wrap =
+		register_get_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_WRAP);
+	/* optional VF host id */
+	p->mp_fld_pci_wr_tg_req_hid =
+		register_query_field(p->mp_reg_pci_wr_tg_data2, PCI_WR_TG_TG_WRDATA2_REQ_HID);
+
+	p->mp_reg_pci_wr_tg_addr =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WRADDR);
+	p->mp_fld_pci_wr_tg_ram_addr =
+		register_get_field(p->mp_reg_pci_wr_tg_addr, PCI_WR_TG_TG_WRADDR_RAM_ADDR);
+
+	p->mp_reg_pci_wr_tg_run =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_WR_RUN);
+	p->mp_fld_pci_wr_tg_run_iteration =
+		register_get_field(p->mp_reg_pci_wr_tg_run, PCI_WR_TG_TG_WR_RUN_WR_ITERATION);
+
+	p->mp_reg_pci_wr_tg_ctrl =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_CTRL);
+	p->mp_fld_pci_wr_tg_ctrl_rdy =
+		register_get_field(p->mp_reg_pci_wr_tg_ctrl, PCI_WR_TG_TG_CTRL_TG_WR_RDY);
+
+	p->mp_reg_pci_wr_tg_seq =
+		module_get_register(p->mp_mod_pci_wr_tg, PCI_WR_TG_TG_SEQ);
+	p->mp_fld_pci_wr_tg_seq_sequence =
+		register_get_field(p->mp_reg_pci_wr_tg_seq, PCI_WR_TG_TG_SEQ_SEQUENCE);
+
+	return 0;
+}
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_low,
+			    (uint32_t)(n_phys_addr & ((1UL << 32) - 1)));
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_phys_addr_high,
+			    (uint32_t)((n_phys_addr >> 32) & ((1UL << 32) - 1)));
+}
+
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_ram_addr, n_ram_addr);
+}
+
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc)
+{
+	field_set_val32(p->mp_fld_pci_wr_tg_req_size, req_size);
+	field_set_val32(p->mp_fld_pci_wr_tg_wait, wait);
+	field_set_val32(p->mp_fld_pci_wr_tg_wrap, wrap);
+	field_set_val32(p->mp_fld_pci_wr_tg_inc_mode, inc);
+	field_flush_register(p->mp_fld_pci_wr_tg_inc_mode);
+}
+
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations)
+{
+	field_set_val_flush32(p->mp_fld_pci_wr_tg_run_iteration, n_iterations);
+}
+
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p)
+{
+	return field_get_updated(p->mp_fld_pci_wr_tg_ctrl_rdy);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
new file mode 100644
index 0000000000..535b39526e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pci_wr_tg.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCI_WR_TG_H__
+#define __NTHW_PCI_WR_TG_H__
+
+struct nthw_pci_wr_tg {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pci_wr_tg;
+	int mn_instance;
+
+	int mn_param_pci_ta_tg_present;
+
+	nt_register_t *mp_reg_pci_wr_tg_data0;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_low;
+
+	nt_register_t *mp_reg_pci_wr_tg_data1;
+	nt_field_t *mp_fld_pci_wr_tg_phys_addr_high;
+
+	nt_register_t *mp_reg_pci_wr_tg_data2;
+	nt_field_t *mp_fld_pci_wr_tg_req_size;
+	nt_field_t *mp_fld_pci_wr_tg_req_hid;
+	nt_field_t *mp_fld_pci_wr_tg_inc_mode;
+	nt_field_t *mp_fld_pci_wr_tg_wait;
+	nt_field_t *mp_fld_pci_wr_tg_wrap;
+
+	nt_register_t *mp_reg_pci_wr_tg_addr;
+	nt_field_t *mp_fld_pci_wr_tg_ram_addr;
+
+	nt_register_t *mp_reg_pci_wr_tg_run;
+	nt_field_t *mp_fld_pci_wr_tg_run_iteration;
+
+	nt_register_t *mp_reg_pci_wr_tg_ctrl;
+	nt_field_t *mp_fld_pci_wr_tg_ctrl_rdy;
+
+	nt_register_t *mp_reg_pci_wr_tg_seq;
+	nt_field_t *mp_fld_pci_wr_tg_seq_sequence;
+};
+
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg_t;
+typedef struct nthw_pci_wr_tg nthw_pci_wr_tg;
+
+nthw_pci_wr_tg_t *nthw_pci_wr_tg_new(void);
+void nthw_pci_wr_tg_delete(nthw_pci_wr_tg_t *p);
+int nthw_pci_wr_tg_init(nthw_pci_wr_tg_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_pci_wr_tg_set_phys_addr(nthw_pci_wr_tg_t *p, uint64_t n_phys_addr);
+void nthw_pci_wr_tg_set_ram_addr(nthw_pci_wr_tg_t *p, int n_ram_addr);
+void nthw_pci_wr_tg_set_ram_data(nthw_pci_wr_tg_t *p, uint32_t req_size, bool wait,
+			    bool wrap, bool inc);
+void nthw_pci_wr_tg_set_run(nthw_pci_wr_tg_t *p, int n_iterations);
+uint32_t nthw_pci_wr_tg_get_ctrl_rdy(nthw_pci_wr_tg_t *p);
+
+#endif /* __NTHW_PCI_WR_TG_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.c b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
new file mode 100644
index 0000000000..07ad784695
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.c
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_pcie3.h"
+
+#define NTHW_TG_REF_FREQ (250000000ULL)
+
+nthw_pcie3_t *nthw_pcie3_new(void)
+{
+	nthw_pcie3_t *p = malloc(sizeof(nthw_pcie3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_pcie3_t));
+	return p;
+}
+
+void nthw_pcie3_delete(nthw_pcie3_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_pcie3_t));
+		free(p);
+	}
+}
+
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_PCIE3, n_instance);
+
+	if (p == NULL)
+		return (mod == NULL ? -1 : 0);
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: PCIE3 %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_pcie3 = mod;
+
+	/* PCIe3 */
+	p->mp_reg_stat_ctrl = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_CTRL);
+	p->mp_fld_stat_ctrl_ena =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_ENA);
+	p->mp_fld_stat_ctrl_req =
+		register_get_field(p->mp_reg_stat_ctrl, PCIE3_STAT_CTRL_STAT_REQ);
+
+	p->mp_reg_stat_rx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RX);
+	p->mp_fld_stat_rx_counter =
+		register_get_field(p->mp_reg_stat_rx, PCIE3_STAT_RX_COUNTER);
+
+	p->mp_reg_stat_tx = module_get_register(p->mp_mod_pcie3, PCIE3_STAT_TX);
+	p->mp_fld_stat_tx_counter =
+		register_get_field(p->mp_reg_stat_tx, PCIE3_STAT_TX_COUNTER);
+
+	p->mp_reg_stat_ref_clk =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_REFCLK);
+	p->mp_fld_stat_ref_clk_ref_clk =
+		register_get_field(p->mp_reg_stat_ref_clk, PCIE3_STAT_REFCLK_REFCLK250);
+
+	p->mp_reg_stat_rq_rdy =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_RDY);
+	p->mp_fld_stat_rq_rdy_counter =
+		register_get_field(p->mp_reg_stat_rq_rdy, PCIE3_STAT_RQ_RDY_COUNTER);
+
+	p->mp_reg_stat_rq_vld =
+		module_get_register(p->mp_mod_pcie3, PCIE3_STAT_RQ_VLD);
+	p->mp_fld_stat_rq_vld_counter =
+		register_get_field(p->mp_reg_stat_rq_vld, PCIE3_STAT_RQ_VLD_COUNTER);
+
+	p->mp_reg_status0 = module_get_register(p->mp_mod_pcie3, PCIE3_STATUS0);
+	p->mp_fld_status0_tags_in_use =
+		register_get_field(p->mp_reg_status0, PCIE3_STATUS0_TAGS_IN_USE);
+
+	p->mp_reg_rp_to_ep_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_RP_TO_EP_ERR);
+	p->mp_fld_rp_to_ep_err_cor =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_COR);
+	p->mp_fld_rp_to_ep_err_non_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_NONFATAL);
+	p->mp_fld_rp_to_ep_err_fatal =
+		register_get_field(p->mp_reg_rp_to_ep_err, PCIE3_RP_TO_EP_ERR_ERR_FATAL);
+
+	p->mp_reg_ep_to_rp_err =
+		module_get_register(p->mp_mod_pcie3, PCIE3_EP_TO_RP_ERR);
+	p->mp_fld_ep_to_rp_err_cor = register_get_field(p->mp_reg_ep_to_rp_err,
+			       PCIE3_EP_TO_RP_ERR_ERR_COR);
+	p->mp_fld_ep_to_rp_err_non_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_NONFATAL);
+	p->mp_fld_ep_to_rp_err_fatal =
+		register_get_field(p->mp_reg_ep_to_rp_err, PCIE3_EP_TO_RP_ERR_ERR_FATAL);
+
+	p->mp_reg_sample_time =
+		module_get_register(p->mp_mod_pcie3, PCIE3_SAMPLE_TIME);
+	p->mp_fld_sample_time =
+		register_get_field(p->mp_reg_sample_time, PCIE3_SAMPLE_TIME_SAMPLE_TIME);
+
+	p->mp_reg_pci_end_point =
+		module_get_register(p->mp_mod_pcie3, PCIE3_PCI_ENDPOINT);
+	p->mp_fld_pci_end_point_if_id =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_IF_ID);
+	p->mp_fld_pci_end_point_send_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_SEND_MSG);
+	p->mp_fld_pci_end_point_get_msg =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_GET_MSG);
+	p->mp_fld_pci_end_point_dmae_p0_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP0_ALLOW_MASK);
+	p->mp_fld_pci_end_point_dmae_p1_allow_mask =
+		register_get_field(p->mp_reg_pci_end_point, PCIE3_PCI_ENDPOINT_DMA_EP1_ALLOW_MASK);
+	if (p->mp_reg_pci_end_point)
+		register_update(p->mp_reg_pci_end_point);
+
+	p->mp_reg_pci_test0 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST0);
+	p->mp_fld_pci_test0 =
+		register_get_field(p->mp_reg_pci_test0, PCIE3_PCI_TEST0_DATA);
+	if (p->mp_reg_pci_test0)
+		register_update(p->mp_reg_pci_test0);
+
+	p->mp_reg_pci_test1 = module_get_register(p->mp_mod_pcie3, PCIE3_PCI_TEST1);
+	p->mp_fld_pci_test1 =
+		register_get_field(p->mp_reg_pci_test1, PCIE3_PCI_TEST1_DATA);
+	if (p->mp_reg_pci_test1)
+		register_update(p->mp_reg_pci_test1);
+
+	p->mp_reg_pci_e3_mark_adr_lsb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_LSB);
+	p->mp_fld_pci_e3_mark_adr_lsb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_lsb, PCIE3_MARKADR_LSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_lsb)
+		register_update(p->mp_reg_pci_e3_mark_adr_lsb);
+
+	p->mp_reg_pci_e3_mark_adr_msb =
+		module_get_register(p->mp_mod_pcie3, PCIE3_MARKADR_MSB);
+	p->mp_fld_pci_e3_mark_adr_msb_adr =
+		register_get_field(p->mp_reg_pci_e3_mark_adr_msb, PCIE3_MARKADR_MSB_ADR);
+	if (p->mp_reg_pci_e3_mark_adr_msb)
+		register_update(p->mp_reg_pci_e3_mark_adr_msb);
+
+	/* Initial setup - disable markerscheme and bifurcation */
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+
+	if (p->mp_fld_pci_e3_mark_adr_lsb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_lsb_adr, 0UL);
+
+	if (p->mp_fld_pci_e3_mark_adr_msb_adr)
+		field_set_val_flush32(p->mp_fld_pci_e3_mark_adr_msb_adr, 0UL);
+
+	if (p->mp_fld_pci_end_point_dmae_p0_allow_mask)
+		field_set_flush(p->mp_fld_pci_end_point_dmae_p0_allow_mask);
+
+	if (p->mp_fld_pci_end_point_dmae_p1_allow_mask)
+		field_clr_flush(p->mp_fld_pci_end_point_dmae_p1_allow_mask);
+	return 0;
+};
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p)
+{
+	field_set_val_flush32(p->mp_fld_sample_time, 0xfee1dead);
+
+	return 0;
+}
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p)
+{
+	field_set_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p)
+{
+	field_clr_all(p->mp_fld_stat_ctrl_ena);
+	field_set_all(p->mp_fld_stat_ctrl_req);
+	field_flush_register(p->mp_fld_stat_ctrl_req);
+	return 0;
+}
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+			uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+			uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+			uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt)
+{
+	*p_rx_cnt = field_get_updated(p->mp_fld_stat_rx_counter);
+	*p_tx_cnt = field_get_updated(p->mp_fld_stat_tx_counter);
+
+	*p_ref_clk_cnt = field_get_updated(p->mp_fld_stat_ref_clk_ref_clk);
+
+	*p_tg_unit_size = NTHW_TG_CNT_SIZE;
+	*p_tg_ref_freq = NTHW_TG_REF_FREQ;
+
+	*p_tag_use_cnt = field_get_updated(p->mp_fld_status0_tags_in_use);
+
+	*p_rq_rdy_cnt = field_get_updated(p->mp_fld_stat_rq_rdy_counter);
+	*p_rq_vld_cnt = field_get_updated(p->mp_fld_stat_rq_vld_counter);
+
+	return 0;
+}
+
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util)
+{
+	uint32_t rx_cnt, tx_cnt, ref_clk_cnt;
+	uint32_t tg_unit_size, tg_ref_freq;
+	uint32_t tag_use_cnt, rq_rdy_cnt, rq_vld_cnt;
+
+	nthw_pcie3_get_stat(p, &rx_cnt, &tx_cnt, &ref_clk_cnt, &tg_unit_size,
+			  &tg_ref_freq, &tag_use_cnt, &rq_rdy_cnt, &rq_vld_cnt);
+
+	if (ref_clk_cnt) {
+		uint64_t nt_bus_util, xil_bus_util;
+		uint64_t rx_rate, tx_rate;
+
+		rx_rate = ((uint64_t)rx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_rx_rate = rx_rate;
+
+		tx_rate = ((uint64_t)tx_cnt * tg_unit_size * tg_ref_freq) /
+			 (uint64_t)ref_clk_cnt;
+		*p_pci_tx_rate = tx_rate;
+
+		*p_ref_clk_cnt = ref_clk_cnt;
+
+		*p_tag_use_cnt = tag_use_cnt;
+
+		nt_bus_util =
+			((uint64_t)rq_vld_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_nt_bus_util = nt_bus_util;
+		xil_bus_util =
+			((uint64_t)rq_rdy_cnt * 1000000ULL) / (uint64_t)ref_clk_cnt;
+		*p_pci_xil_bus_util = xil_bus_util;
+	} else {
+		*p_ref_clk_cnt = 0;
+		*p_pci_nt_bus_util = 0;
+		*p_pci_xil_bus_util = 0;
+	}
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u: empty function\n", __func__, __LINE__);
+
+	(void)p;
+	(void)epc;
+
+	return 0;
+}
+
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc)
+{
+	NT_LOG(DBG, NTHW, "%s:%u:\n", __func__, __LINE__);
+	assert(epc);
+	nthw_pcie3_get_stat_rate(p, &epc->cur_tx, &epc->cur_rx, &epc->n_ref_clk_cnt,
+			      &epc->n_tags_in_use, &epc->cur_pci_nt_util,
+			      &epc->cur_pci_xil_util);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_pcie3.h b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
new file mode 100644
index 0000000000..beb79a9577
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_pcie3.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PCIE3_H__
+#define __NTHW_PCIE3_H__
+
+struct nthw_pcie3 {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_pcie3;
+	int mn_instance;
+
+	nt_register_t *mp_reg_stat_ctrl;
+	nt_field_t *mp_fld_stat_ctrl_req;
+	nt_field_t *mp_fld_stat_ctrl_ena;
+
+	nt_register_t *mp_reg_stat_rx;
+	nt_field_t *mp_fld_stat_rx_counter;
+
+	nt_register_t *mp_reg_stat_tx;
+	nt_field_t *mp_fld_stat_tx_counter;
+
+	nt_register_t *mp_reg_stat_rq_rdy;
+	nt_field_t *mp_fld_stat_rq_rdy_counter;
+
+	nt_register_t *mp_reg_stat_rq_vld;
+	nt_field_t *mp_fld_stat_rq_vld_counter;
+
+	nt_register_t *mp_reg_status0;
+	nt_field_t *mp_fld_status0_tags_in_use;
+
+	nt_register_t *mp_reg_stat_ref_clk;
+	nt_field_t *mp_fld_stat_ref_clk_ref_clk;
+
+	nt_register_t *mp_reg_rp_to_ep_err;
+	nt_field_t *mp_fld_rp_to_ep_err_cor;
+	nt_field_t *mp_fld_rp_to_ep_err_non_fatal;
+	nt_field_t *mp_fld_rp_to_ep_err_fatal;
+
+	nt_register_t *mp_reg_ep_to_rp_err;
+	nt_field_t *mp_fld_ep_to_rp_err_cor;
+	nt_field_t *mp_fld_ep_to_rp_err_non_fatal;
+	nt_field_t *mp_fld_ep_to_rp_err_fatal;
+
+	nt_register_t *mp_reg_sample_time;
+	nt_field_t *mp_fld_sample_time;
+
+	nt_register_t *mp_reg_pci_end_point;
+	nt_field_t *mp_fld_pci_end_point_if_id;
+	nt_field_t *mp_fld_pci_end_point_send_msg;
+	nt_field_t *mp_fld_pci_end_point_get_msg;
+	nt_field_t *mp_fld_pci_end_point_dmae_p0_allow_mask;
+	nt_field_t *mp_fld_pci_end_point_dmae_p1_allow_mask;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_lsb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_lsb_adr;
+
+	nt_register_t *mp_reg_pci_e3_mark_adr_msb;
+	nt_field_t *mp_fld_pci_e3_mark_adr_msb_adr;
+
+	nt_register_t *mp_reg_pci_test0;
+	nt_field_t *mp_fld_pci_test0;
+
+	nt_register_t *mp_reg_pci_test1;
+	nt_field_t *mp_fld_pci_test1;
+
+	nt_register_t *mp_reg_pci_test2;
+	nt_field_t *mp_fld_pci_test2;
+
+	nt_register_t *mp_reg_pci_test3;
+	nt_field_t *mp_fld_pci_test3;
+};
+
+typedef struct nthw_pcie3 nthw_pcie3_t;
+typedef struct nthw_pcie3 nthw_pcie3;
+
+nthw_pcie3_t *nthw_pcie3_new(void);
+void nthw_pcie3_delete(nthw_pcie3_t *p);
+int nthw_pcie3_init(nthw_pcie3_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_pcie3_trigger_sample_time(nthw_pcie3_t *p);
+
+int nthw_pcie3_stat_req_enable(nthw_pcie3_t *p);
+int nthw_pcie3_stat_req_disable(nthw_pcie3_t *p);
+
+int nthw_pcie3_get_stat(nthw_pcie3_t *p, uint32_t *p_rx_cnt, uint32_t *p_tx_cnt,
+		      uint32_t *p_ref_clk_cnt, uint32_t *p_tg_unit_size,
+		      uint32_t *p_tg_ref_freq, uint32_t *p_tag_use_cnt,
+		      uint32_t *p_rq_rdy_cnt, uint32_t *p_rq_vld_cnt);
+int nthw_pcie3_get_stat_rate(nthw_pcie3_t *p, uint64_t *p_pci_rx_rate,
+			  uint64_t *p_pci_tx_rate, uint64_t *p_ref_clk_cnt,
+			  uint64_t *p_tag_use_cnt, uint64_t *p_pci_nt_bus_util,
+			  uint64_t *p_pci_xil_bus_util);
+
+int nthw_pcie3_end_point_counters_sample_pre(nthw_pcie3_t *p,
+					struct nthw_hif_end_point_counters *epc);
+int nthw_pcie3_end_point_counters_sample_post(nthw_pcie3_t *p,
+		struct nthw_hif_end_point_counters *epc);
+
+#endif /* __NTHW_PCIE3_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.c b/drivers/net/ntnic/nthw/core/nthw_sdc.c
new file mode 100644
index 0000000000..0547b92c47
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.c
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_sdc.h"
+
+nthw_sdc_t *nthw_sdc_new(void)
+{
+	nthw_sdc_t *p = malloc(sizeof(nthw_sdc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_sdc_t));
+	return p;
+}
+
+void nthw_sdc_delete(nthw_sdc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_sdc_t));
+		free(p);
+	}
+}
+
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SDC, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SDC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_sdc = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CTRL);
+		p->mp_fld_ctrl_init = register_get_field(p_reg, SDC_CTRL_INIT);
+		p->mp_fld_ctrl_run_test =
+			register_get_field(p_reg, SDC_CTRL_RUN_TEST);
+		p->mp_fld_ctrl_stop_client =
+			register_get_field(p_reg, SDC_CTRL_STOP_CLIENT);
+		p->mp_fld_ctrl_test_enable =
+			register_get_field(p_reg, SDC_CTRL_TEST_EN);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_STAT);
+		p->mp_fld_stat_calib = register_get_field(p_reg, SDC_STAT_CALIB);
+		p->mp_fld_stat_cell_cnt_stopped =
+			register_get_field(p_reg, SDC_STAT_CELL_CNT_STOPPED);
+		p->mp_fld_stat_err_found =
+			register_get_field(p_reg, SDC_STAT_ERR_FOUND);
+		p->mp_fld_stat_init_done =
+			register_get_field(p_reg, SDC_STAT_INIT_DONE);
+		p->mp_fld_stat_mmcm_lock =
+			register_get_field(p_reg, SDC_STAT_MMCM_LOCK);
+		p->mp_fld_stat_pll_lock =
+			register_get_field(p_reg, SDC_STAT_PLL_LOCK);
+		p->mp_fld_stat_resetting =
+			register_get_field(p_reg, SDC_STAT_RESETTING);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT);
+		p->mp_fld_cell_cnt =
+			register_get_field(p_reg, SDC_CELL_CNT_CELL_CNT);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_CELL_CNT_PERIOD);
+		p->mp_fld_cell_cnt_period =
+			register_get_field(p_reg, SDC_CELL_CNT_PERIOD_CELL_CNT_PERIOD);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_FILL_LVL);
+		p->mp_fld_fill_level =
+			register_get_field(p_reg, SDC_FILL_LVL_FILL_LVL);
+
+		p_reg = module_get_register(p->mp_mod_sdc, SDC_MAX_FILL_LVL);
+		p->mp_fld_max_fill_level =
+			register_get_field(p_reg, SDC_MAX_FILL_LVL_MAX_FILL_LVL);
+	}
+	return 0;
+}
+
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask)
+{
+	int n_err_cnt = 0;
+	uint64_t n_mask = 0;
+	uint32_t val;
+	uint32_t val_mask;
+	int n_val_width;
+
+	if (!p || !pn_result_mask)
+		return -1;
+
+	val = field_get_updated(p->mp_fld_stat_calib);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_calib);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_init_done);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_init_done);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_mmcm_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_mmcm_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_pll_lock);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_pll_lock);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != val_mask)
+		n_err_cnt++;
+
+	val = field_get_updated(p->mp_fld_stat_resetting);
+	n_val_width = field_get_bit_width(p->mp_fld_stat_resetting);
+	val_mask = ((1 << n_val_width) - 1);
+	n_mask = (n_mask << n_val_width) | (val & val_mask);
+	if (val != 0)
+		n_err_cnt++;
+
+	if (pn_result_mask)
+		*pn_result_mask = n_mask;
+
+	return n_err_cnt; /* 0 = all ok */
+}
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval)
+{
+	int res;
+	int n_err_cnt = 0;
+
+	res = field_wait_set_all32(p->mp_fld_stat_calib, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_init_done, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_mmcm_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_set_all32(p->mp_fld_stat_pll_lock, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	res = field_wait_clr_all32(p->mp_fld_stat_resetting, n_poll_iterations,
+				 n_poll_interval);
+	if (res)
+		n_err_cnt++;
+
+	return n_err_cnt; /* 0 = all ok */
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_sdc.h b/drivers/net/ntnic/nthw/core/nthw_sdc.h
new file mode 100644
index 0000000000..e6c08ffbc3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_sdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SDC_H__
+#define __NTHW_SDC_H__
+
+struct nthw_sdc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_sdc;
+	int mn_instance;
+
+	nt_field_t *mp_fld_ctrl_init;
+	nt_field_t *mp_fld_ctrl_run_test;
+	nt_field_t *mp_fld_ctrl_stop_client;
+	nt_field_t *mp_fld_ctrl_test_enable;
+
+	nt_field_t *mp_fld_stat_calib;
+	nt_field_t *mp_fld_stat_cell_cnt_stopped;
+	nt_field_t *mp_fld_stat_err_found;
+	nt_field_t *mp_fld_stat_init_done;
+	nt_field_t *mp_fld_stat_mmcm_lock;
+	nt_field_t *mp_fld_stat_pll_lock;
+	nt_field_t *mp_fld_stat_resetting;
+
+	nt_field_t *mp_fld_cell_cnt;
+	nt_field_t *mp_fld_cell_cnt_period;
+	nt_field_t *mp_fld_fill_level;
+	nt_field_t *mp_fld_max_fill_level;
+};
+
+typedef struct nthw_sdc nthw_sdc_t;
+typedef struct nthw_sdc nthw_sdc;
+
+nthw_sdc_t *nthw_sdc_new(void);
+int nthw_sdc_init(nthw_sdc_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_sdc_delete(nthw_sdc_t *p);
+
+int nthw_sdc_wait_states(nthw_sdc_t *p, const int n_poll_iterations,
+		       const int n_poll_interval);
+int nthw_sdc_get_states(nthw_sdc_t *p, uint64_t *pn_result_mask);
+
+#endif /* __NTHW_SDC_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.c b/drivers/net/ntnic/nthw/core/nthw_si5340.c
new file mode 100644
index 0000000000..3337f1f9e3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ *
+ * This file implements Si5340 clock synthesizer support.
+ * The implementation is generic and must be tailored to a specific use by the
+ * correct initialization data.
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_si5340.h"
+
+#define SI5340_WAIT_US(x) NT_OS_WAIT_USEC(x)
+
+#define SI5340_LOG_DEBUG(...) NT_LOG(DBG, NTHW, __VA_ARGS__)
+#define SI5340_LOG_INFO(...) NT_LOG(INF, NTHW, __VA_ARGS__)
+#define SI5340_LOG_WARN(...) NT_LOG(WRN, NTHW, __VA_ARGS__)
+#define SI5340_LOG_ERROR(...) NT_LOG(ERR, NTHW, __VA_ARGS__)
+
+#define SI5340_PAGE_REG_ADDR (0x01)
+
+nthw_si5340_t *nthw_si5340_new(void)
+{
+	nthw_si5340_t *p = malloc(sizeof(nthw_si5340_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_si5340_t));
+	return p;
+}
+
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr)
+{
+	uint8_t data;
+
+	p->mp_nthw_iic = p_nthw_iic;
+	p->mn_iic_addr = n_iic_addr;
+	p->mn_clk_cfg = -1;
+
+	p->m_si5340_page = 0;
+	data = p->m_si5340_page;
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, SI5340_PAGE_REG_ADDR, 1,
+			  &data);
+
+	return 0;
+}
+
+void nthw_si5340_delete(nthw_si5340_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_si5340_t));
+		free(p);
+	}
+}
+
+/*
+ * Read access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7bit address 0x74
+ */
+static uint8_t nthw_si5340_read(nthw_si5340_t *p, uint16_t reg_addr)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+	uint8_t data;
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_read_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+	return data;
+}
+
+/*
+ * Write access (via I2C) to the clock synthesizer IC. The IC is located at I2C
+ * 7 bit address 0x74
+ */
+static int nthw_si5340_write(nthw_si5340_t *p, uint16_t reg_addr, uint8_t data)
+{
+	const uint8_t offset_adr = (uint8_t)(reg_addr & 0xff);
+	uint8_t page = (uint8_t)((reg_addr >> 8) & 0xff);
+
+	/* check if we are on the right page */
+	if (page != p->m_si5340_page) {
+		nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr,
+				  SI5340_PAGE_REG_ADDR, 1, &page);
+		p->m_si5340_page = page;
+	}
+	nthw_iic_write_data(p->mp_nthw_iic, p->mn_iic_addr, offset_adr, 1, &data);
+
+	return 0;
+}
+
+static int nthw_si5340_cfg(nthw_si5340_t *p, const void *p_data, int data_cnt,
+			  clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	uint16_t addr;
+	uint8_t value;
+	uint8_t ctrl_value;
+
+	NT_LOG(DBG, NTHW, "%s: %s: data_cnt = %d, dataFormat = %d\n",
+	       p_adapter_id_str, __func__, data_cnt, data_format);
+
+	for (i = 0; i < data_cnt; i++) {
+		if (data_format == CLK_PROFILE_DATA_FMT_1) {
+			addr = ((const clk_profile_data_fmt1_t *)p_data)->reg_addr;
+			value = ((const clk_profile_data_fmt1_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt1_t *)p_data) + 1;
+		} else if (data_format == CLK_PROFILE_DATA_FMT_2) {
+			addr = (uint16_t)(((const clk_profile_data_fmt2_t *)p_data)
+					  ->reg_addr);
+			value = ((const clk_profile_data_fmt2_t *)p_data)->reg_val;
+			p_data = ((const clk_profile_data_fmt2_t *)p_data) + 1;
+		} else {
+			NT_LOG(ERR, NTHW,
+			       "%s: Unhandled Si5340 data format (%d)\n",
+			       p_adapter_id_str, data_format);
+			return -1;
+		}
+
+		if (addr == 0x0006) {
+			/* Wait 300ms before continuing. See NT200E3-2-PTP_U23_Si5340_adr0_v2.h */
+			NT_OS_WAIT_USEC(300000);
+		}
+
+		nthw_si5340_write(p, addr, value);
+
+		if (addr == 0x001C) {
+			/* skip readback for "soft reset" register */
+			continue;
+		}
+
+		ctrl_value = nthw_si5340_read(p, addr);
+
+		if (ctrl_value != value) {
+			NT_LOG(ERR, NTHW,
+			       "%s: Si5340 configuration readback check failed. (Addr = 0x%04X, Write = 0x%02X, Read = 0x%02X)\n",
+			       p_adapter_id_str, addr, value, ctrl_value);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format)
+{
+	const char *const p_adapter_id_str =
+		p->mp_nthw_iic->mp_fpga->p_fpga_info->mp_adapter_id_str;
+	int i;
+	bool success = false;
+	uint8_t status, sticky;
+	uint8_t design_id[9];
+
+	(void)nthw_si5340_cfg(p, p_data, data_cnt, data_format);
+
+	/* Check if DPLL is locked and SYS is calibrated */
+	for (i = 0; i < 5; i++) {
+		status = nthw_si5340_read(p, 0x0c);
+		sticky = nthw_si5340_read(p, 0x11);
+		nthw_si5340_write(p, 0x11, 0x00);
+
+		if (((status & 0x09) == 0x00) && ((sticky & 0x09) == 0x00)) {
+			success = true;
+			break;
+		}
+		NT_OS_WAIT_USEC(1000000); /* 1 sec */
+	}
+
+	if (!success) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Si5340 configuration failed. (Status = 0x%02X, Sticky = 0x%02X)\n",
+		       p_adapter_id_str, status, sticky);
+		return -1;
+	}
+
+	for (i = 0; i < (int)sizeof(design_id) - 1; i++)
+		design_id[i] = nthw_si5340_read(p, (uint16_t)(0x26B + i));
+	design_id[sizeof(design_id) - 1] = 0;
+
+	(void)design_id; /* Only used in debug mode */
+	NT_LOG(DBG, NTHW, "%s: Si5340.DesignId = %s\n", p_adapter_id_str,
+	       design_id);
+
+	return 0;
+}
+
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_1);
+}
+
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt)
+{
+	return nthw_si5340_config(p, p_data, data_cnt, CLK_PROFILE_DATA_FMT_2);
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_si5340.h b/drivers/net/ntnic/nthw/core/nthw_si5340.h
new file mode 100644
index 0000000000..f588b5b825
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_si5340.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SI5340_H__
+#define __NTHW_SI5340_H__
+
+#include "nthw_clock_profiles.h"
+
+#define SI5340_SUCCESS (0)
+#define SI5340_FAILED (999)
+#define SI5340_TIMEOUT (666)
+
+struct nthw_si5340 {
+	uint8_t mn_iic_addr;
+	nthw_iic_t *mp_nthw_iic;
+	int mn_clk_cfg;
+	uint8_t m_si5340_page;
+};
+
+typedef struct nthw_si5340 nthw_si5340_t;
+
+nthw_si5340_t *nthw_si5340_new(void);
+int nthw_si5340_init(nthw_si5340_t *p, nthw_iic_t *p_nthw_iic, uint8_t n_iic_addr);
+void nthw_si5340_delete(nthw_si5340_t *p);
+
+int nthw_si5340_config(nthw_si5340_t *p, const void *p_data, int data_cnt,
+		      clk_profile_data_fmt_t data_format);
+int nthw_si5340_config_fmt1(nthw_si5340_t *p, const clk_profile_data_fmt1_t *p_data,
+			  const int data_cnt);
+int nthw_si5340_config_fmt2(nthw_si5340_t *p, const clk_profile_data_fmt2_t *p_data,
+			  const int data_cnt);
+
+#endif /* __NTHW_SI5338_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.c b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
new file mode 100644
index 0000000000..454c9b73b8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+
+#include "nthw_spi_v3.h"
+
+#include <arpa/inet.h>
+
+#undef SPI_V3_DEBUG_PRINT
+
+nthw_spi_v3_t *nthw_spi_v3_new(void)
+{
+	nthw_spi_v3_t *p = malloc(sizeof(nthw_spi_v3_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+	return p;
+}
+
+void nthw_spi_v3_delete(nthw_spi_v3_t *p)
+{
+	if (p) {
+		if (p->mp_spim_mod) {
+			nthw_spim_delete(p->mp_spim_mod);
+			p->mp_spim_mod = NULL;
+		}
+
+		if (p->mp_spis_mod) {
+			nthw_spis_delete(p->mp_spis_mod);
+			p->mp_spis_mod = NULL;
+		}
+
+		memset(p, 0, sizeof(nthw_spi_v3_t));
+		free(p);
+	}
+}
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out)
+{
+	p->m_time_out = time_out;
+	return 0;
+}
+
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p)
+{
+	(void)p;
+	return 3;
+}
+
+/*
+ * Wait until Tx data have been sent after they have been placed in the Tx FIFO.
+ */
+static int wait_for_tx_data_sent(nthw_spim_t *p_spim_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	while (true) {
+		NT_OS_WAIT_USEC(1000); /* Every 1ms */
+
+		result = nthw_spim_get_tx_fifo_empty(p_spim_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW,
+			       "nthw_spim_get_tx_fifo_empty failed\n");
+			return result;
+		}
+
+		if (empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Wait until Rx data have been received.
+ */
+static int wait_for_rx_data_ready(nthw_spis_t *p_spis_mod, uint64_t time_out)
+{
+	int result;
+	bool empty;
+	uint64_t start_time;
+	uint64_t cur_time;
+
+	start_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+
+	/* Wait for data to become ready in the Rx FIFO */
+	while (true) {
+		NT_OS_WAIT_USEC(10000); /* Every 10ms */
+
+		result = nthw_spis_get_rx_fifo_empty(p_spis_mod, &empty);
+		if (result != 0) {
+			NT_LOG(WRN, NTHW, "nthw_spis_get_rx_empty failed\n");
+			return result;
+		}
+
+		if (!empty)
+			break;
+
+		cur_time = NT_OS_GET_TIME_MONOTONIC_COUNTER();
+		if ((cur_time - start_time) > time_out) {
+			NT_LOG(WRN, NTHW, "%s: Timed out\n", __func__);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef SPI_V3_DEBUG_PRINT
+static void dump_hex(uint8_t *p_data, uint16_t count)
+{
+	int i;
+	int j = 0;
+	char tmp_str[128];
+
+	for (i = 0; i < count; i++) {
+		sprintf(&tmp_str[j * 3], "%02X ", *(p_data++));
+		j++;
+
+		if (j == 16 || (i == count - 1)) {
+			tmp_str[j * 3 - 1] = '\0';
+			NT_LOG(DBG, NTHW, "    %s\n", tmp_str);
+			j = 0;
+		}
+	}
+}
+#endif
+
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint32_t result;
+
+	p->mn_instance_no = n_instance_no;
+
+	nthw_spi_v3_set_timeout(p, 1);
+
+	/* Initialize SPIM module */
+	p->mp_spim_mod = nthw_spim_new();
+
+	result = nthw_spim_init(p->mp_spim_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Initialize SPIS module */
+	p->mp_spis_mod = nthw_spis_new();
+
+	result = nthw_spis_init(p->mp_spis_mod, p_fpga, n_instance_no);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_init failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	/* Reset SPIM and SPIS modules */
+	result = nthw_spim_reset(p->mp_spim_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spim_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	result = nthw_spis_reset(p->mp_spis_mod);
+	if (result != 0) {
+		NT_LOG(ERR, NTHW, "%s: nthw_spis_reset failed: %d\n",
+		       p_adapter_id_str, result);
+	}
+
+	return result;
+}
+
+/*
+ * Send Tx data using the SPIM module and receive any data using the SPIS module.
+ * The data are sent and received being wrapped into a SPI v3 container.
+ */
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf)
+{
+	const uint16_t max_payload_rx_size = rx_buf->size;
+	int result = 0;
+
+#pragma pack(push, 1)
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t opcode;
+			uint16_t size;
+		};
+	} spi_tx_hdr;
+
+	union {
+		uint32_t raw;
+
+		struct {
+			uint16_t error_code;
+			uint16_t size;
+		};
+	} spi_rx_hdr;
+#pragma pack(pop)
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s:  Started\n", __func__);
+#endif
+
+	/* Disable transmission from Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, false);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	/* Enable SPIS module */
+	result = nthw_spis_enable(p->mp_spis_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spis_enable failed\n");
+		return result;
+	}
+
+	/* Put data into Tx FIFO */
+	spi_tx_hdr.opcode = opcode;
+	spi_tx_hdr.size = tx_buf->size;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "opcode=0x%04X tx_bufsize=0x%04X rx_bufsize=0x%04X\n",
+	       opcode, tx_buf->size, rx_buf->size);
+
+#endif /* SPI_V3_DEBUG_PRINT */
+
+	result = nthw_spim_write_tx_fifo(p->mp_spim_mod, htonl(spi_tx_hdr.raw));
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_write_tx_fifo failed\n");
+		return result;
+	}
+
+	{
+		uint8_t *tx_data = (uint8_t *)tx_buf->p_buf;
+		uint16_t tx_size = tx_buf->size;
+		uint16_t count;
+		uint32_t value;
+
+		while (tx_size > 0) {
+			if (tx_size > 4) {
+				count = 4;
+			} else {
+				count = tx_size;
+				value = 0;
+			}
+
+			memcpy(&value, tx_data, count);
+
+			result = nthw_spim_write_tx_fifo(p->mp_spim_mod,
+							htonl(value));
+			if (result != 0) {
+				NT_LOG(WRN, NTHW,
+				       "nthw_spim_write_tx_fifo failed\n");
+				return result;
+			}
+
+			tx_size = (uint16_t)(tx_size - count);
+			tx_data += count;
+		}
+	}
+
+	/* Enable Tx FIFO */
+	result = nthw_spim_enable(p->mp_spim_mod, true);
+	if (result != 0) {
+		NT_LOG(WRN, NTHW, "nthw_spim_enable failed\n");
+		return result;
+	}
+
+	result = wait_for_tx_data_sent(p->mp_spim_mod, p->m_time_out);
+	if (result != 0)
+		return result;
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "%s: SPI header and payload data have been sent\n",
+	       __func__);
+#endif
+
+	{
+		/*
+		 * Start receiving data
+		 * The first data to read is the header
+		 */
+		uint16_t rx_size = sizeof(spi_rx_hdr.raw);
+		uint8_t *rx_data = (uint8_t *)rx_buf->p_buf;
+		bool rx_hdr_read = false;
+
+		rx_buf->size = 0;
+
+		while (true) {
+			uint16_t count;
+			uint32_t value;
+
+			if (!rx_hdr_read) { /* Read the header */
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+								p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+								&spi_rx_hdr.raw);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				spi_rx_hdr.raw = ntohl(spi_rx_hdr.raw);
+				rx_size = spi_rx_hdr.size;
+				rx_hdr_read = true; /* Next time read payload */
+
+#ifdef SPI_V3_DEBUG_PRINT
+				NT_LOG(DBG, NTHW,
+				       "  spi_rx_hdr.error_code = 0x%04X, spi_rx_hdr.size = 0x%04X\n",
+				       spi_rx_hdr.error_code, spi_rx_hdr.size);
+#endif
+
+				if (spi_rx_hdr.error_code != 0) {
+					result = -1; /* NT_ERROR_AVR_OPCODE_RETURNED_ERROR; */
+					break;
+				}
+
+				if (rx_size > max_payload_rx_size) {
+					result = 1; /* NT_ERROR_AVR_RX_BUFFER_TOO_SMALL; */
+					break;
+				}
+			} else { /* Read the payload */
+				count = (uint16_t)(rx_size < 4U ? rx_size : 4U);
+
+				if (count == 0)
+					break;
+
+				result = wait_for_rx_data_ready(p->mp_spis_mod,
+							    p->m_time_out);
+				if (result != 0)
+					return result;
+
+				result = nthw_spis_read_rx_fifo(p->mp_spis_mod,
+							       &value);
+				if (result != 0) {
+					NT_LOG(WRN, NTHW,
+					       "nthw_spis_read_rx_fifo failed\n");
+					return result;
+				}
+
+				value = ntohl(value); /* Convert to host endian */
+				memcpy(rx_data, &value, count);
+				rx_buf->size = (uint16_t)(rx_buf->size + count);
+				rx_size = (uint16_t)(rx_size - count);
+				rx_data += count;
+			}
+		}
+	}
+
+#ifdef SPI_V3_DEBUG_PRINT
+	NT_LOG(DBG, NTHW, "  RxData: %d\n", rx_buf->size);
+	dump_hex(rx_buf->p_buf, rx_buf->size);
+	NT_LOG(DBG, NTHW, "%s:  Ended: %d\n", __func__, result);
+#endif
+
+	return result;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spi_v3.h b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
new file mode 100644
index 0000000000..c54379a273
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spi_v3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NT4GA_SPI_V3__
+#define __NT4GA_SPI_V3__
+
+/* Must include v1.x series. The first v1.0a only had 248 bytes of storage. v2.0x have 255 */
+#define MAX_AVR_CONTAINER_SIZE (248)
+
+enum avr_opcodes {
+	AVR_OP_NOP = 0, /* v2 NOP command */
+	/* version handlers */
+	AVR_OP_VERSION = 1,
+	AVR_OP_SPI_VERSION = 2, /* v2.0+ command Get protocol version */
+	AVR_OP_SYSINFO = 3,
+	/* Ping handlers */
+	AVR_OP_PING = 4,
+	AVR_OP_PING_DELAY = 5,
+	/* i2c handlers */
+	AVR_OP_I2C_READ = 9,
+	AVR_OP_I2C_WRITE = 10,
+	AVR_OP_I2C_RANDOM_READ = 11,
+	/* VPD handlers */
+	AVR_OP_VPD_READ = 19,
+	AVR_OP_VPD_WRITE = 20,
+	/* SENSOR handlers */
+	AVR_OP_SENSOR_FETCH = 28,
+	/* The following command are only relevant to V3 */
+	AVR_OP_SENSOR_MON_CONTROL = 42,
+	AVR_OP_SENSOR_MON_SETUP = 43,
+	/* special version handler */
+	AVR_OP_SYSINFO_2 = 62,
+};
+
+#define GEN2_AVR_IDENT_SIZE (20)
+#define GEN2_AVR_VERSION_SIZE (50)
+
+#define GEN2_PN_SIZE (13)
+#define GEN2_PBA_SIZE (16)
+#define GEN2_SN_SIZE (10)
+#define GEN2_BNAME_SIZE (14)
+#define GEN2_PLATFORM_SIZE (72)
+#define GEN2_VPD_SIZE_TOTAL                                                  \
+	(1 + GEN2_PN_SIZE + GEN2_PBA_SIZE + GEN2_SN_SIZE + GEN2_BNAME_SIZE + \
+	 GEN2_PLATFORM_SIZE + 2)
+
+typedef struct vpd_eeprom_s {
+	uint8_t psu_hw_version; /* Hw revision - MUST NEVER ne overwritten. */
+	/* Vital Product Data: P/N   (13bytes ascii 0-9) */
+	uint8_t vpd_pn[GEN2_PN_SIZE];
+	/* Vital Product Data: PBA   (16bytes ascii 0-9) */
+	uint8_t vpd_pba[GEN2_PBA_SIZE];
+	/* Vital Product Data: S/N   (10bytes ascii 0-9) */
+	uint8_t vpd_sn[GEN2_SN_SIZE];
+	/* Vital Product Data: Board Name (10bytes ascii) (e.g. "ntmainb1e2" or "ntfront20b1") */
+	uint8_t vpd_board_name[GEN2_BNAME_SIZE];
+	/*
+	 * Vital Product Data: Other (72bytes of MAC addresses or other stuff.. (gives up to 12 mac
+	 * addresses)
+	 */
+	uint8_t vpd_platform_section[GEN2_PLATFORM_SIZE];
+	/* CRC16 checksum of all of above. This field is not included in the checksum */
+	uint16_t crc16;
+} vpd_eeprom_t;
+
+typedef struct {
+	uint8_t psu_hw_revision;
+	char board_type[GEN2_BNAME_SIZE + 1];
+	char product_id[GEN2_PN_SIZE + 1];
+	char pba_id[GEN2_PBA_SIZE + 1];
+	char serial_number[GEN2_SN_SIZE + 1];
+	uint8_t product_family;
+	uint32_t feature_mask;
+	uint32_t invfeature_mask;
+	uint8_t no_of_macs;
+	uint8_t mac_address[6];
+	uint16_t custom_id;
+	uint8_t user_id[8];
+} board_info_t;
+
+struct tx_rx_buf {
+	uint16_t size;
+	void *p_buf;
+};
+
+struct nthw__spi__v3 {
+	int m_time_out;
+	int mn_instance_no;
+	nthw_spim_t *mp_spim_mod;
+	nthw_spis_t *mp_spis_mod;
+};
+
+typedef struct nthw__spi__v3 nthw_spi_v3_t;
+typedef struct nthw__spi__v3 nthw_spi_v3;
+
+nthw_spi_v3_t *nthw_spi_v3_new(void);
+int nthw_spi_v3_init(nthw_spi_v3_t *p, nt_fpga_t *p_fpga, int n_instance_no);
+void nthw_spi_v3_delete(nthw_spi_v3_t *p);
+
+int nthw_spi_v3_set_timeout(nthw_spi_v3_t *p, int time_out);
+int nthw_spi_v3_get_version(nthw_spi_v3_t *p);
+int nthw_spi_v3_transfer(nthw_spi_v3_t *p, uint16_t opcode,
+			 struct tx_rx_buf *tx_buf, struct tx_rx_buf *rx_buf);
+
+#endif /* __NT4GA_SPI_V3__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.c b/drivers/net/ntnic/nthw/core/nthw_spim.c
new file mode 100644
index 0000000000..ece7db26e1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spim.h"
+
+nthw_spim_t *nthw_spim_new(void)
+{
+	nthw_spim_t *p = malloc(sizeof(nthw_spim_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spim_t));
+	return p;
+}
+
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spim = mod;
+
+	/* SPIM is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spim, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spim, SPIM_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIM_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spim, SPIM_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIM_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIM_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIM_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIM_CR_RXRST);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spim, SPIM_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIM_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIM_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIM_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIM_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIM_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIM_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIM_SR_RXLVL);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spim, SPIM_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIM_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spim, SPIM_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIM_DRR_DRR);
+
+	p->mp_reg_cfg = module_get_register(p->mp_mod_spim, SPIM_CFG);
+	p->mp_fld_cfg_pre = register_get_field(p->mp_reg_cfg, SPIM_CFG_PRE);
+
+	return 0;
+}
+
+void nthw_spim_delete(nthw_spim_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spim_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spim_reset(nthw_spim_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data)
+{
+	field_set_val_flush32(p->mp_fld_dtr_dtr, n_data);
+	return 0;
+}
+
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_txempty) ? true : false;
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spim.h b/drivers/net/ntnic/nthw/core/nthw_spim.h
new file mode 100644
index 0000000000..713751e563
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spim.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIM_H__
+#define __NTHW_SPIM_H__
+
+struct nthw_spim {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spim;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+	nt_register_t *mp_reg_cfg;
+	nt_field_t *mp_fld_cfg_pre;
+};
+
+typedef struct nthw_spim nthw_spim_t;
+typedef struct nthw_spim nthw_spim;
+
+nthw_spim_t *nthw_spim_new(void);
+int nthw_spim_init(nthw_spim_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spim_delete(nthw_spim_t *p);
+
+uint32_t nthw_spim_reset(nthw_spim_t *p);
+uint32_t nthw_spim_enable(nthw_spim_t *p, bool b_enable);
+uint32_t nthw_spim_get_tx_fifo_empty(nthw_spim_t *p, bool *pb_empty);
+uint32_t nthw_spim_write_tx_fifo(nthw_spim_t *p, uint32_t n_data);
+
+#endif /* __NTHW_SPIM_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.c b/drivers/net/ntnic/nthw/core/nthw_spis.c
new file mode 100644
index 0000000000..8799584194
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_spis.h"
+
+nthw_spis_t *nthw_spis_new(void)
+{
+	nthw_spis_t *p = malloc(sizeof(nthw_spis_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_spis_t));
+	return p;
+}
+
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_SPIS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: SPIS %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_spis = mod;
+
+	/* SPIS is a primary communication channel - turn off debug by default */
+	module_set_debug_mode(p->mp_mod_spis, 0x00);
+
+	p->mp_reg_srr = module_get_register(p->mp_mod_spis, SPIS_SRR);
+	p->mp_fld_srr_rst = register_get_field(p->mp_reg_srr, SPIS_SRR_RST);
+
+	p->mp_reg_cr = module_get_register(p->mp_mod_spis, SPIS_CR);
+	p->mp_fld_cr_loop = register_get_field(p->mp_reg_cr, SPIS_CR_LOOP);
+	p->mp_fld_cr_en = register_get_field(p->mp_reg_cr, SPIS_CR_EN);
+	p->mp_fld_cr_txrst = register_get_field(p->mp_reg_cr, SPIS_CR_TXRST);
+	p->mp_fld_cr_rxrst = register_get_field(p->mp_reg_cr, SPIS_CR_RXRST);
+	p->mp_fld_cr_debug = register_get_field(p->mp_reg_cr, SPIS_CR_DEBUG);
+
+	p->mp_reg_sr = module_get_register(p->mp_mod_spis, SPIS_SR);
+	p->mp_fld_sr_done = register_get_field(p->mp_reg_sr, SPIS_SR_DONE);
+	p->mp_fld_sr_txempty = register_get_field(p->mp_reg_sr, SPIS_SR_TXEMPTY);
+	p->mp_fld_sr_rxempty = register_get_field(p->mp_reg_sr, SPIS_SR_RXEMPTY);
+	p->mp_fld_sr_txfull = register_get_field(p->mp_reg_sr, SPIS_SR_TXFULL);
+	p->mp_fld_sr_rxfull = register_get_field(p->mp_reg_sr, SPIS_SR_RXFULL);
+	p->mp_fld_sr_txlvl = register_get_field(p->mp_reg_sr, SPIS_SR_TXLVL);
+	p->mp_fld_sr_rxlvl = register_get_field(p->mp_reg_sr, SPIS_SR_RXLVL);
+	p->mp_fld_sr_frame_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_FRAME_ERR);
+	p->mp_fld_sr_read_err = register_get_field(p->mp_reg_sr, SPIS_SR_READ_ERR);
+	p->mp_fld_sr_write_err =
+		register_get_field(p->mp_reg_sr, SPIS_SR_WRITE_ERR);
+
+	p->mp_reg_dtr = module_get_register(p->mp_mod_spis, SPIS_DTR);
+	p->mp_fld_dtr_dtr = register_get_field(p->mp_reg_dtr, SPIS_DTR_DTR);
+
+	p->mp_reg_drr = module_get_register(p->mp_mod_spis, SPIS_DRR);
+	p->mp_fld_drr_drr = register_get_field(p->mp_reg_drr, SPIS_DRR_DRR);
+
+	p->mp_reg_ram_ctrl = module_get_register(p->mp_mod_spis, SPIS_RAM_CTRL);
+	p->mp_fld_ram_ctrl_adr =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_ADR);
+	p->mp_fld_ram_ctrl_cnt =
+		register_get_field(p->mp_reg_ram_ctrl, SPIS_RAM_CTRL_CNT);
+
+	p->mp_reg_ram_data = module_get_register(p->mp_mod_spis, SPIS_RAM_DATA);
+	p->mp_fld_ram_data_data =
+		register_get_field(p->mp_reg_ram_data, SPIS_RAM_DATA_DATA);
+
+	return 0;
+}
+
+void nthw_spis_delete(nthw_spis_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_spis_t));
+		free(p);
+	}
+}
+
+uint32_t nthw_spis_reset(nthw_spis_t *p)
+{
+	register_update(p->mp_reg_srr);
+	field_set_val32(p->mp_fld_srr_rst,
+		       0x0A); /* 0x0A hardcoded value - see doc */
+	register_flush(p->mp_reg_srr, 1);
+
+	return 0;
+}
+
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_cr_en);
+
+	if (b_enable)
+		field_set_all(p->mp_fld_cr_en);
+
+	else
+		field_clr_all(p->mp_fld_cr_en);
+	field_flush_register(p->mp_fld_cr_en);
+
+	return 0;
+}
+
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty)
+{
+	assert(pb_empty);
+
+	*pb_empty = field_get_updated(p->mp_fld_sr_rxempty) ? true : false;
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data)
+{
+	assert(p_data);
+
+	*p_data = field_get_updated(p->mp_fld_drr_drr);
+
+	return 0;
+}
+
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result)
+{
+	assert(p_sensor_result);
+
+	field_set_val32(p->mp_fld_ram_ctrl_adr, n_result_idx);
+	field_set_val32(p->mp_fld_ram_ctrl_cnt, 1);
+	register_flush(p->mp_reg_ram_ctrl, 1);
+
+	*p_sensor_result = field_get_updated(p->mp_fld_ram_data_data);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_spis.h b/drivers/net/ntnic/nthw/core/nthw_spis.h
new file mode 100644
index 0000000000..2ebe840c9e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_spis.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_SPIS_H__
+#define __NTHW_SPIS_H__
+
+struct nthw_spis {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_spis;
+	int mn_instance;
+
+	nt_register_t *mp_reg_srr;
+	nt_field_t *mp_fld_srr_rst;
+
+	nt_register_t *mp_reg_cr;
+	nt_field_t *mp_fld_cr_loop;
+	nt_field_t *mp_fld_cr_en;
+	nt_field_t *mp_fld_cr_txrst;
+	nt_field_t *mp_fld_cr_rxrst;
+	nt_field_t *mp_fld_cr_debug;
+
+	nt_register_t *mp_reg_sr;
+	nt_field_t *mp_fld_sr_done;
+	nt_field_t *mp_fld_sr_txempty;
+	nt_field_t *mp_fld_sr_rxempty;
+	nt_field_t *mp_fld_sr_txfull;
+	nt_field_t *mp_fld_sr_rxfull;
+	nt_field_t *mp_fld_sr_txlvl;
+	nt_field_t *mp_fld_sr_rxlvl;
+	nt_field_t *mp_fld_sr_frame_err;
+	nt_field_t *mp_fld_sr_read_err;
+	nt_field_t *mp_fld_sr_write_err;
+
+	nt_register_t *mp_reg_dtr;
+	nt_field_t *mp_fld_dtr_dtr;
+
+	nt_register_t *mp_reg_drr;
+	nt_field_t *mp_fld_drr_drr;
+
+	nt_register_t *mp_reg_ram_ctrl;
+	nt_field_t *mp_fld_ram_ctrl_adr;
+	nt_field_t *mp_fld_ram_ctrl_cnt;
+
+	nt_register_t *mp_reg_ram_data;
+	nt_field_t *mp_fld_ram_data_data;
+};
+
+typedef struct nthw_spis nthw_spis_t;
+typedef struct nthw_spis nthw_spis;
+
+nthw_spis_t *nthw_spis_new(void);
+int nthw_spis_init(nthw_spis_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_spis_delete(nthw_spis_t *p);
+
+uint32_t nthw_spis_reset(nthw_spis_t *p);
+uint32_t nthw_spis_enable(nthw_spis_t *p, bool b_enable);
+uint32_t nthw_spis_get_rx_fifo_empty(nthw_spis_t *p, bool *pb_empty);
+uint32_t nthw_spis_read_rx_fifo(nthw_spis_t *p, uint32_t *p_data);
+uint32_t nthw_spis_read_sensor(nthw_spis_t *p, uint8_t n_result_idx,
+			      uint32_t *p_sensor_result);
+
+#endif /* __NTHW_SPIS_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c
new file mode 100644
index 0000000000..8ea4a4c440
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_tsm.h"
+
+nthw_tsm_t *nthw_tsm_new(void)
+{
+	nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_tsm_t));
+	return p;
+}
+
+void nthw_tsm_delete(nthw_tsm_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_tsm_t));
+		free(p);
+	}
+}
+
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_TSM, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_tsm = mod;
+
+	{
+		nt_register_t *p_reg;
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_CONFIG);
+		p->mp_fld_config_ts_format =
+			register_get_field(p_reg, TSM_CONFIG_TS_FORMAT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL);
+		p->mp_fld_timer_ctrl_timer_en_t0 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0);
+		p->mp_fld_timer_ctrl_timer_en_t1 =
+			register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T0);
+		p->mp_fld_timer_timer_t0_max_count =
+			register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIMER_T1);
+		p->mp_fld_timer_timer_t1_max_count =
+			register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_reg_time_lo = module_get_register(p->mp_mod_tsm, TSM_TIME_LO);
+		p->mp_fld_time_lo = register_get_field(p_reg, TSM_TIME_LO_NS);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_reg_time_hi = module_get_register(p->mp_mod_tsm, TSM_TIME_HI);
+		p->mp_fld_time_hi = register_get_field(p_reg, TSM_TIME_HI_SEC);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_reg_ts_lo = module_get_register(p->mp_mod_tsm, TSM_TS_LO);
+		p->mp_fld_ts_lo = register_get_field(p_reg, TSM_TS_LO_TIME);
+
+		p_reg = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_reg_ts_hi = module_get_register(p->mp_mod_tsm, TSM_TS_HI);
+		p->mp_fld_ts_hi = register_get_field(p_reg, TSM_TS_HI_TIME);
+	}
+	return 0;
+}
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts)
+{
+	uint32_t n_ts_lo, n_ts_hi;
+	uint64_t val;
+
+	if (!p_ts)
+		return -1;
+
+	n_ts_lo = field_get_updated(p->mp_fld_ts_lo);
+	n_ts_hi = field_get_updated(p->mp_fld_ts_hi);
+
+	val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo);
+
+	if (p_ts)
+		*p_ts = val;
+
+	return 0;
+}
+
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time)
+{
+	uint32_t n_time_lo, n_time_hi;
+	uint64_t val;
+
+	if (!p_time)
+		return -1;
+
+	n_time_lo = field_get_updated(p->mp_fld_time_lo);
+	n_time_hi = field_get_updated(p->mp_fld_time_hi);
+
+	val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo);
+
+	if (p_time)
+		*p_time = val;
+
+	return 0;
+}
+
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time)
+{
+	field_set_val_flush32(p->mp_fld_time_lo, (n_time & 0xFFFFFFFF));
+	field_set_val_flush32(p->mp_fld_time_hi,
+			    (uint32_t)((n_time >> 32) & 0xFFFFFFFF));
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t0);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T0 - stat toggle timer */
+	field_update_register(p->mp_fld_timer_timer_t0_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count,
+			    n_timer_val); /* ns (50*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable)
+{
+	field_update_register(p->mp_fld_timer_ctrl_timer_en_t1);
+	if (b_enable)
+		field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+
+	else
+		field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1);
+	return 0;
+}
+
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val)
+{
+	/* Timer T1 - keep alive timer */
+	field_update_register(p->mp_fld_timer_timer_t1_max_count);
+	field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count,
+			    n_timer_val); /* ns (100*1000*1000) */
+	return 0;
+}
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val)
+{
+	field_update_register(p->mp_fld_config_ts_format);
+	/* 0x1: Native - 10ns units, start date: 1970-01-01. */
+	field_set_val_flush32(p->mp_fld_config_ts_format, n_val);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.h b/drivers/net/ntnic/nthw/core/nthw_tsm.h
new file mode 100644
index 0000000000..590e04c312
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_tsm.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_TSM_H__
+#define __NTHW_TSM_H__
+
+struct nthw_tsm {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_tsm;
+	int mn_instance;
+
+	nt_field_t *mp_fld_config_ts_format;
+
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t0;
+	nt_field_t *mp_fld_timer_ctrl_timer_en_t1;
+
+	nt_field_t *mp_fld_timer_timer_t0_max_count;
+
+	nt_field_t *mp_fld_timer_timer_t1_max_count;
+
+	nt_register_t *mp_reg_ts_lo;
+	nt_field_t *mp_fld_ts_lo;
+
+	nt_register_t *mp_reg_ts_hi;
+	nt_field_t *mp_fld_ts_hi;
+
+	nt_register_t *mp_reg_time_lo;
+	nt_field_t *mp_fld_time_lo;
+
+	nt_register_t *mp_reg_time_hi;
+	nt_field_t *mp_fld_time_hi;
+};
+
+typedef struct nthw_tsm nthw_tsm_t;
+typedef struct nthw_tsm nthw_tsm;
+
+nthw_tsm_t *nthw_tsm_new(void);
+void nthw_tsm_delete(nthw_tsm_t *p);
+int nthw_tsm_init(nthw_tsm_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts);
+int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time);
+int nthw_tsm_set_time(nthw_tsm_t *p, uint64_t n_time);
+
+int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable);
+int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val);
+
+int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val);
+
+#endif /* __NTHW_TSM_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.c b/drivers/net/ntnic/nthw/nthw_dbs.c
new file mode 100644
index 0000000000..9fc853da73
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.c
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_dbs.h"
+
+#undef DBS_PRINT_REGS
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs);
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec,
+			       uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
+nthw_dbs_t *nthw_dbs_new(void)
+{
+	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_dbs_t));
+	return p;
+}
+
+void nthw_dbs_delete(nthw_dbs_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_dbs_t));
+		free(p);
+	}
+}
+
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_DBS, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: DBS %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_dbs = mod;
+
+	p->mn_param_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+	if (p->mn_param_dbs_present == 0) {
+		NT_LOG(WRN, NTHW,
+		       "%s: DBS %d: logical error: module found but not flagged at present\n",
+		       p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
+	}
+
+	p->mp_reg_rx_control = module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy =
+		register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr = register_query_field(p->mp_reg_rx_init_val,
+				       DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy =
+		register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr = register_query_field(p->mp_reg_tx_init_val,
+				       DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	p->mp_reg_rx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_rx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
+	p->mp_reg_rx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_rx_descriptor_reader_control, DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		register_get_field(p->mp_reg_tx_descriptor_reader_control, DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		register_query_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_queue_property_control =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data =
+		module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
+	/* HW QoS Tx rate limiting policing RFC2697/RFC4111 */
+	p->mp_reg_tx_queue_qos_control =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_CTRL);
+	p->mp_reg_tx_queue_qos_data =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_DATA);
+	if (p->mp_reg_tx_queue_qos_control) {
+		p->mp_reg_tx_queue_qos_control_adr =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_ADR);
+		p->mp_reg_tx_queue_qos_control_cnt =
+			register_query_field(p->mp_reg_tx_queue_qos_control, DBS_TX_QOS_CTRL_CNT);
+
+		if (p->mp_reg_tx_queue_qos_data) {
+			p->mp_reg_tx_queue_qos_data_en =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_EN);
+			p->mp_reg_tx_queue_qos_data_ir =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_IR);
+			p->mp_reg_tx_queue_qos_data_bs =
+				register_query_field(p->mp_reg_tx_queue_qos_data,
+						     DBS_TX_QOS_DATA_BS);
+		}
+	}
+
+	p->mp_reg_tx_queue_qos_rate =
+		module_query_register(p->mp_mod_dbs, DBS_TX_QOS_RATE);
+	if (p->mp_reg_tx_queue_qos_rate) {
+		p->mp_reg_tx_queue_qos_rate_mul =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_MUL);
+		p->mp_reg_tx_queue_qos_rate_div =
+			register_query_field(p->mp_reg_tx_queue_qos_rate, DBS_TX_QOS_RATE_DIV);
+	}
+
+	return 0;
+}
+
+int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+void dbs_reset(nthw_dbs_t *p)
+{
+	uint32_t i;
+
+	NT_LOG(DBG, NTHW, "NthwDbs::%s: resetting DBS", __func__);
+
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+
+		set_shadow_tx_qos_data(p, i, 0, 0, 0);
+		flush_tx_qos_data(p, i);
+	}
+}
+
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+	printf("rx_queue_enable %u\n", rx_queue_enable);
+#endif
+
+	field_set_val32(p->mp_fld_rx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_rx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_rx_control_rx_queues_enable, rx_queue_enable);
+	register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_rx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_rx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_rx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_rx_control_used_writer_update_speed);
+	*rx_queue_enable = field_get_val32(p->mp_fld_rx_control_rx_queues_enable);
+	return 0;
+}
+
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable)
+{
+#ifdef DBS_PRINT_REGS
+	printf("last_queue %u\n", last_queue);
+	printf("avail_monitor_enable %u\n", avail_monitor_enable);
+	printf("avail_monitor_speed %u\n", avail_monitor_speed);
+	printf("used_write_enable %u\n", used_write_enable);
+	printf("used_write_speed %u\n", used_write_speed);
+#endif
+
+	field_set_val32(p->mp_fld_tx_control_last_queue, last_queue);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, avail_monitor_enable);
+	field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed,
+		       avail_monitor_speed);
+	field_set_val32(p->mp_fld_tx_control_used_write_enable, used_write_enable);
+	field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, used_write_speed);
+	field_set_val32(p->mp_fld_tx_control_tx_queues_enable, tx_queue_enable);
+	register_flush(p->mp_reg_tx_control, 1);
+	return 0;
+}
+
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable)
+{
+	*last_queue = field_get_val32(p->mp_fld_tx_control_last_queue);
+	*avail_monitor_enable =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_enable);
+	*avail_monitor_speed =
+		field_get_val32(p->mp_fld_tx_control_avail_monitor_scan_speed);
+	*used_write_enable = field_get_val32(p->mp_fld_tx_control_used_write_enable);
+	*used_write_speed =
+		field_get_val32(p->mp_fld_tx_control_used_writer_update_speed);
+	*tx_queue_enable = field_get_val32(p->mp_fld_tx_control_tx_queues_enable);
+	return 0;
+}
+
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_rx_init_val) {
+		field_set_val32(p->mp_fld_rx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_rx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_rx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_rx_init_init, init);
+	field_set_val32(p->mp_fld_rx_init_queue, queue);
+	register_flush(p->mp_reg_rx_init, 1);
+	return 0;
+}
+
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_rx_init_init);
+	*queue = field_get_val32(p->mp_fld_rx_init_queue);
+	*busy = field_get_val32(p->mp_fld_rx_init_busy);
+	return 0;
+}
+
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue)
+{
+	if (p->mp_reg_tx_init_val) {
+		field_set_val32(p->mp_fld_tx_init_val_idx, start_idx);
+		field_set_val32(p->mp_fld_tx_init_val_ptr, start_ptr);
+		register_flush(p->mp_reg_tx_init_val, 1);
+	}
+	field_set_val32(p->mp_fld_tx_init_init, init);
+	field_set_val32(p->mp_fld_tx_init_queue, queue);
+	register_flush(p->mp_reg_tx_init, 1);
+	return 0;
+}
+
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
+{
+	*init = field_get_val32(p->mp_fld_tx_init_init);
+	*queue = field_get_val32(p->mp_fld_tx_init_queue);
+	*busy = field_get_val32(p->mp_fld_tx_init_busy);
+	return 0;
+}
+
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_idle_idle, idle);
+	field_set_val32(p->mp_fld_rx_idle_queue, queue);
+	register_flush(p->mp_reg_rx_idle, 1);
+	return 0;
+}
+
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_rx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_rx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_rx_idle_busy);
+	return 0;
+}
+
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue)
+
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_idle_idle, idle);
+	field_set_val32(p->mp_fld_tx_idle_queue, queue);
+	register_flush(p->mp_reg_tx_idle, 1);
+	return 0;
+}
+
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy)
+{
+	if (!p->mp_reg_tx_idle)
+		return -ENOTSUP;
+
+	*idle = field_get_updated(p->mp_fld_tx_idle_idle);
+	*queue = 0;
+	*busy = field_get_updated(p->mp_fld_tx_idle_busy);
+	return 0;
+}
+
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_rx_ptr_queue, queue);
+	register_flush(p->mp_reg_rx_ptr, 1);
+	return 0;
+}
+
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_rx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_rx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_rx_ptr_valid);
+	return 0;
+}
+
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_fld_tx_ptr_queue, queue);
+	register_flush(p->mp_reg_tx_ptr, 1);
+	return 0;
+}
+
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid)
+{
+	if (!p->mp_reg_tx_ptr)
+		return -ENOTSUP;
+
+	*ptr = field_get_updated(p->mp_fld_tx_ptr_ptr);
+	*queue = 0;
+	*valid = field_get_updated(p->mp_fld_tx_ptr_valid);
+	return 0;
+}
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void
+set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index,
+		uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		       p->m_rx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		       p->m_rx_am_shadow[index].host_id);
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			       p->m_rx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			       p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t enable,
+			       uint32_t host_id, uint32_t packed,
+			       uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		       p->m_tx_am_shadow[index].enable);
+	field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		       p->m_tx_am_shadow[index].host_id);
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			       p->m_tx_am_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			       p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id,
+			   packed, int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void
+set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		       p->m_rx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			       p->m_rx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			       p->m_rx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_rx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			       p->m_rx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			       p->m_rx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			       p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void
+set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index,
+					uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index,
+				      uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t packed,
+			       uint32_t int_enable, uint32_t vec, uint32_t istk,
+			       uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		       p->m_tx_uw_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			       p->m_tx_uw_shadow[index].queue_size);
+	}
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			       p->m_tx_uw_shadow[index].packed);
+	}
+	if (p->mp_fld_tx_used_writer_data_int) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			       p->m_tx_uw_shadow[index].int_enable);
+		field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			       p->m_tx_uw_shadow[index].vec);
+		field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			       p->m_tx_uw_shadow[index].istk);
+	}
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			       p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size,
+			   packed, int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t header,
+			       uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		       p->m_rx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			       p->m_rx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		       p->m_rx_dr_shadow[index].header);
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			       p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   header, packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void
+set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+				       uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index,
+				     uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index,
+					uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index,
+				     uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index,
+				     uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p, uint32_t index,
+			       uint64_t guest_physical_address, uint32_t host_id,
+			       uint32_t queue_size, uint32_t port,
+			       uint32_t header, uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		     (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address,
+		     2);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		       p->m_tx_dr_shadow[index].host_id);
+	if (module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+	} else {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			       p->m_tx_dr_shadow[index].queue_size);
+	}
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		       p->m_tx_dr_shadow[index].header);
+	field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		       p->m_tx_dr_shadow[index].port);
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			       p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size,
+			   port, header, packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+		uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index,
+			       uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		       p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
+
+static void set_tx_qos_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_control_adr, index);
+	field_set_val32(p->mp_reg_tx_queue_qos_control_cnt, 1);
+	register_flush(p->mp_reg_tx_queue_qos_control, 1);
+}
+
+static void set_shadow_tx_qos_data_enable(nthw_dbs_t *p, uint32_t index,
+				      uint32_t enable)
+{
+	p->m_tx_qos_shadow[index].enable = enable;
+}
+
+static void set_shadow_tx_qos_data_ir(nthw_dbs_t *p, uint32_t index, uint32_t ir)
+{
+	p->m_tx_qos_shadow[index].ir = ir;
+}
+
+static void set_shadow_tx_qos_data_bs(nthw_dbs_t *p, uint32_t index, uint32_t bs)
+{
+	p->m_tx_qos_shadow[index].bs = bs;
+}
+
+static void set_shadow_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable,
+				uint32_t ir, uint32_t bs)
+{
+	set_shadow_tx_qos_data_enable(p, index, enable);
+	set_shadow_tx_qos_data_ir(p, index, ir);
+	set_shadow_tx_qos_data_bs(p, index, bs);
+}
+
+static void flush_tx_qos_data(nthw_dbs_t *p, uint32_t index)
+{
+	field_set_val32(p->mp_reg_tx_queue_qos_data_en, p->m_tx_qos_shadow[index].enable);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_ir, p->m_tx_qos_shadow[index].ir);
+	field_set_val32(p->mp_reg_tx_queue_qos_data_bs, p->m_tx_qos_shadow[index].bs);
+
+	set_tx_qos_data_index(p, index);
+	register_flush(p->mp_reg_tx_queue_qos_data, 1);
+}
+
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs)
+{
+	if (!p->mp_reg_tx_queue_qos_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qos_data(p, index, enable, ir, bs);
+	flush_tx_qos_data(p, index);
+	return 0;
+}
+
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div)
+{
+	if (!p->mp_reg_tx_queue_qos_rate)
+		return -ENOTSUP;
+
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_mul, mul);
+	field_set_val32(p->mp_reg_tx_queue_qos_rate_div, div);
+	register_flush(p->mp_reg_tx_queue_qos_rate, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_dbs.h b/drivers/net/ntnic/nthw/nthw_dbs.h
new file mode 100644
index 0000000000..d5891d7538
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_dbs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_DBS_HPP_
+#define NTHW_DBS_HPP_
+
+#define NT_DBS_RX_QUEUES_MAX (128)
+#define NT_DBS_TX_QUEUES_MAX (128)
+
+/*
+ * Struct for implementation of memory bank shadows
+ */
+
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
+struct nthw_dbs_tx_qos_data_s {
+	uint32_t enable;
+	uint32_t ir;
+	uint32_t bs;
+};
+
+struct nthw_dbs_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_dbs;
+	int mn_instance;
+
+	int mn_param_dbs_present;
+
+	nt_register_t *mp_reg_rx_control;
+	nt_field_t *mp_fld_rx_control_last_queue;
+	nt_field_t *mp_fld_rx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_rx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_rx_control_used_write_enable;
+	nt_field_t *mp_fld_rx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_rx_control_rx_queues_enable;
+
+	nt_register_t *mp_reg_tx_control;
+	nt_field_t *mp_fld_tx_control_last_queue;
+	nt_field_t *mp_fld_tx_control_avail_monitor_enable;
+	nt_field_t *mp_fld_tx_control_avail_monitor_scan_speed;
+	nt_field_t *mp_fld_tx_control_used_write_enable;
+	nt_field_t *mp_fld_tx_control_used_writer_update_speed;
+	nt_field_t *mp_fld_tx_control_tx_queues_enable;
+
+	nt_register_t *mp_reg_rx_init;
+	nt_field_t *mp_fld_rx_init_init;
+	nt_field_t *mp_fld_rx_init_queue;
+	nt_field_t *mp_fld_rx_init_busy;
+
+	nt_register_t *mp_reg_rx_init_val;
+	nt_field_t *mp_fld_rx_init_val_idx;
+	nt_field_t *mp_fld_rx_init_val_ptr;
+
+	nt_register_t *mp_reg_rx_ptr;
+	nt_field_t *mp_fld_rx_ptr_ptr;
+	nt_field_t *mp_fld_rx_ptr_queue;
+	nt_field_t *mp_fld_rx_ptr_valid;
+
+	nt_register_t *mp_reg_tx_init;
+	nt_field_t *mp_fld_tx_init_init;
+	nt_field_t *mp_fld_tx_init_queue;
+	nt_field_t *mp_fld_tx_init_busy;
+
+	nt_register_t *mp_reg_tx_init_val;
+	nt_field_t *mp_fld_tx_init_val_idx;
+	nt_field_t *mp_fld_tx_init_val_ptr;
+
+	nt_register_t *mp_reg_tx_ptr;
+	nt_field_t *mp_fld_tx_ptr_ptr;
+	nt_field_t *mp_fld_tx_ptr_queue;
+	nt_field_t *mp_fld_tx_ptr_valid;
+
+	nt_register_t *mp_reg_rx_idle;
+	nt_field_t *mp_fld_rx_idle_idle;
+	nt_field_t *mp_fld_rx_idle_queue;
+	nt_field_t *mp_fld_rx_idle_busy;
+
+	nt_register_t *mp_reg_tx_idle;
+	nt_field_t *mp_fld_tx_idle_idle;
+	nt_field_t *mp_fld_tx_idle_queue;
+	nt_field_t *mp_fld_tx_idle_busy;
+
+	nt_register_t *mp_reg_rx_avail_monitor_control;
+	nt_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_rx_avail_monitor_data;
+	nt_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_tx_avail_monitor_control;
+	nt_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nt_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nt_register_t *mp_reg_tx_avail_monitor_data;
+	nt_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nt_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nt_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nt_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nt_register_t *mp_reg_rx_used_writer_control;
+	nt_field_t *mp_fld_rx_used_writer_control_adr;
+	nt_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_rx_used_writer_data;
+	nt_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_used_writer_data_host_id;
+	nt_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_rx_used_writer_data_packed;
+	nt_field_t *mp_fld_rx_used_writer_data_int;
+	nt_field_t *mp_fld_rx_used_writer_data_vec;
+	nt_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nt_register_t *mp_reg_tx_used_writer_control;
+	nt_field_t *mp_fld_tx_used_writer_control_adr;
+	nt_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nt_register_t *mp_reg_tx_used_writer_data;
+	nt_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_used_writer_data_host_id;
+	nt_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nt_field_t *mp_fld_tx_used_writer_data_packed;
+	nt_field_t *mp_fld_tx_used_writer_data_int;
+	nt_field_t *mp_fld_tx_used_writer_data_vec;
+	nt_field_t *mp_fld_tx_used_writer_data_istk;
+	nt_field_t *mp_fld_tx_used_writer_data_in_order;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_control;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_rx_descriptor_reader_data;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_control;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nt_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nt_register_t *mp_reg_tx_descriptor_reader_data;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nt_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
+	nt_register_t *mp_reg_tx_queue_property_control;
+	nt_field_t *mp_fld_tx_queue_property_control_adr;
+	nt_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_property_data;
+	nt_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	nt_register_t *mp_reg_tx_queue_qos_control;
+	nt_field_t *mp_reg_tx_queue_qos_control_adr;
+	nt_field_t *mp_reg_tx_queue_qos_control_cnt;
+
+	nt_register_t *mp_reg_tx_queue_qos_data;
+	nt_field_t *mp_reg_tx_queue_qos_data_en;
+	nt_field_t *mp_reg_tx_queue_qos_data_ir;
+	nt_field_t *mp_reg_tx_queue_qos_data_bs;
+
+	nt_register_t *mp_reg_tx_queue_qos_rate;
+	nt_field_t *mp_reg_tx_queue_qos_rate_mul;
+	nt_field_t *mp_reg_tx_queue_qos_rate_div;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qos_data_s m_tx_qos_shadow[NT_DBS_TX_QUEUES_MAX];
+};
+
+typedef struct nthw_dbs_s nthw_dbs_t;
+
+nthw_dbs_t *nthw_dbs_new(void);
+void nthw_dbs_delete(nthw_dbs_t *p);
+int dbs_init(nthw_dbs_t *p, nt_fpga_t *p_fpga, int n_instance);
+void dbs_reset(nthw_dbs_t *p);
+
+int dbs_reset_rx_control(nthw_dbs_t *p);
+int dbs_reset_tx_control(nthw_dbs_t *p);
+int set_rx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t rx_queue_enable);
+int nthw_dbs_get_rx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *rx_queue_enable);
+int set_tx_control(nthw_dbs_t *p, uint32_t last_queue,
+		   uint32_t avail_monitor_enable, uint32_t avail_monitor_speed,
+		   uint32_t used_write_enable, uint32_t used_write_speed,
+		   uint32_t tx_queue_enable);
+int nthw_dbs_get_tx_control(nthw_dbs_t *p, uint32_t *last_queue,
+			 uint32_t *avail_monitor_enable,
+			 uint32_t *avail_monitor_speed, uint32_t *used_write_enable,
+			 uint32_t *used_write_speed, uint32_t *tx_queue_enable);
+int set_rx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr,
+		uint32_t init, uint32_t queue);
+int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_rx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_tx_idle(nthw_dbs_t *p, uint32_t idle, uint32_t queue);
+int get_tx_idle(nthw_dbs_t *p, uint32_t *idle, uint32_t *queue, uint32_t *busy);
+int set_rx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_rx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_tx_ptr_queue(nthw_dbs_t *p, uint32_t queue);
+int get_tx_ptr(nthw_dbs_t *p, uint32_t *ptr, uint32_t *queue, uint32_t *valid);
+int set_rx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t enable, uint32_t host_id, uint32_t packed,
+		   uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t packed,
+		   uint32_t int_enable, uint32_t vec, uint32_t istk,
+		   uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t header,
+		   uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p, uint32_t index, uint64_t guest_physical_address,
+		   uint32_t host_id, uint32_t queue_size, uint32_t port,
+		   uint32_t header, uint32_t packed);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+int set_tx_qos_data(nthw_dbs_t *p, uint32_t index, uint32_t enable, uint32_t ir,
+		    uint32_t bs);
+int set_tx_qos_rate(nthw_dbs_t *p, uint32_t mul, uint32_t div);
+
+#endif /* NTHW_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/nthw_drv.h b/drivers/net/ntnic/nthw/nthw_drv.h
new file mode 100644
index 0000000000..7fdd9bf0e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_drv.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_DRV_H__
+#define __NTHW_DRV_H__
+
+#include "nthw_profile.h"
+
+typedef enum nt_meta_port_type_e {
+	PORT_TYPE_PHYSICAL,
+	PORT_TYPE_VIRTUAL,
+	PORT_TYPE_OVERRIDE,
+} nt_meta_port_type_t;
+
+#include "nthw_helper.h"
+#include "nthw_platform_drv.h"
+#include "nthw_fpga_model.h"
+#include "nthw_stat.h"
+#include "nthw_dbs.h"
+#include "nthw_epp.h"
+#include "nthw_core.h"
+
+typedef struct nthwhw_info_s {
+	/* From FW */
+	int hw_id;
+	int hw_id_emulated;
+	char hw_plat_id_str[32];
+
+	struct vpd_info_s {
+		int mn_mac_addr_count;
+		uint64_t mn_mac_addr_value;
+		uint8_t ma_mac_addr_octets[6];
+	} vpd_info;
+} nthw_hw_info_t;
+
+typedef struct fpga_info_s {
+	uint64_t n_fpga_ident;
+
+	int n_fpga_type_id;
+	int n_fpga_prod_id;
+	int n_fpga_ver_id;
+	int n_fpga_rev_id;
+
+	int n_fpga_build_time;
+
+	int n_fpga_debug_mode;
+
+	int n_nims;
+	int n_phy_ports;
+	int n_phy_quads;
+	int n_rx_ports;
+	int n_tx_ports;
+
+	enum fpga_info_profile profile;
+
+	struct nt_fpga_s *mp_fpga;
+
+	struct nthw_rac *mp_nthw_rac;
+	struct nthw_hif *mp_nthw_hif;
+	struct nthw_pcie3 *mp_nthw_pcie3;
+	struct nthw_tsm *mp_nthw_tsm;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	nthw_epp_t *mp_nthw_epp;
+
+	uint8_t *bar0_addr; /* Needed for register read/write */
+	size_t bar0_size;
+
+	int adapter_no; /* Needed for nthw_rac DMA array indexing */
+	uint32_t pciident; /* Needed for nthw_rac DMA memzone_reserve */
+	int numa_node; /* Needed for nthw_rac DMA memzone_reserve */
+
+	char *mp_adapter_id_str; /* Pointer to string literal used in nthw log messages */
+
+	struct nthwhw_info_s nthw_hw_info;
+
+	nthw_adapter_id_t n_nthw_adapter_id;
+
+} fpga_info_t;
+
+#endif /* __NTHW_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_epp.c b/drivers/net/ntnic/nthw/nthw_epp.c
new file mode 100644
index 0000000000..fbe3993b25
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_epp.h"
+
+#include <errno.h> /* ENOTSUP */
+
+nthw_epp_t *nthw_epp_new(void)
+{
+	nthw_epp_t *p = malloc(sizeof(nthw_epp_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_epp_t));
+	return p;
+}
+
+void nthw_epp_delete(nthw_epp_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_epp_t));
+		free(p);
+	}
+}
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance)
+{
+	return nthw_epp_init(NULL, p_fpga, n_instance) == 0;
+}
+
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_EPP, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: EPP %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_epp = mod;
+
+	p->mn_epp_categories = fpga_get_product_param(p_fpga, NT_EPP_CATEGORIES, 0);
+
+	p->mp_reg_reciepe_memory_control =
+		module_get_register(p->mp_mod_epp, EPP_RCP_CTRL);
+	p->mp_fld_reciepe_memory_control_adr =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_ADR);
+	p->mp_fld_reciepe_memory_control_cnt =
+		register_get_field(p->mp_reg_reciepe_memory_control, EPP_RCP_CTRL_CNT);
+
+	p->mp_reg_reciepe_memory_data =
+		module_get_register(p->mp_mod_epp, EPP_RCP_DATA);
+	p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_MTU_EPP_EN);
+	p->mp_fld_reciepe_memory_data_size_adjust_tx_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_TXP);
+	p->mp_fld_reciepe_memory_data_size_adjust_virtual_port =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_SIZE_ADJUST_VPORT);
+	p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_FIXED_18B_L2_MTU);
+	p->mp_fld_reciepe_memory_data_txp_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_TX_QOS_EPP_EN);
+	p->mp_fld_reciepe_memory_data_queue_qos_epp_enable =
+		register_get_field(p->mp_reg_reciepe_memory_data, EPP_RCP_DATA_QUEUE_QOS_EPP_EN);
+
+	p->mp_reg_txp_port_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_CTRL);
+	p->mp_fld_txp_port_mtu_control_adr =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_ADR);
+	p->mp_fld_txp_port_mtu_control_cnt =
+		register_get_field(p->mp_reg_txp_port_mtu_control, EPP_TXP_MTU_CTRL_CNT);
+
+	p->mp_reg_txp_port_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_TXP_MTU_DATA);
+	p->mp_fld_txp_port_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_txp_port_mtu_data, EPP_TXP_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_queue_mtu_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_CTRL);
+	p->mp_fld_queue_mtu_control_adr =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_ADR);
+	p->mp_fld_queue_mtu_control_cnt =
+		register_get_field(p->mp_reg_queue_mtu_control, EPP_QUEUE_MTU_CTRL_CNT);
+
+	p->mp_reg_queue_mtu_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_MTU_DATA);
+	p->mp_fld_queue_mtu_data_max_mtu =
+		register_get_field(p->mp_reg_queue_mtu_data, EPP_QUEUE_MTU_DATA_MAX_MTU);
+
+	p->mp_reg_txp_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_TXP_QOS_CTRL);
+	p->mp_fld_txp_qos_control_adr =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_ADR);
+	p->mp_fld_txp_qos_control_cnt =
+		register_get_field(p->mp_reg_txp_qos_control, EPP_TXP_QOS_CTRL_CNT);
+
+	p->mp_reg_txp_qos_data = module_get_register(p->mp_mod_epp, EPP_TXP_QOS_DATA);
+	p->mp_fld_txp_qos_data_enable =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_EN);
+	p->mp_fld_txp_qos_data_information_rate =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR);
+	p->mp_fld_txp_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_IR_FRACTION);
+	p->mp_fld_txp_qos_data_burst_size =
+		register_get_field(p->mp_reg_txp_qos_data, EPP_TXP_QOS_DATA_BS);
+
+	p->mp_reg_vport_qos_control =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_CTRL);
+	p->mp_fld_vport_qos_control_adr =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_ADR);
+	p->mp_fld_vport_qos_control_cnt =
+		register_get_field(p->mp_reg_vport_qos_control, EPP_VPORT_QOS_CTRL_CNT);
+
+	p->mp_reg_vport_qos_data =
+		module_get_register(p->mp_mod_epp, EPP_VPORT_QOS_DATA);
+	p->mp_fld_vport_qos_data_enable =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_EN);
+	p->mp_fld_vport_qos_data_information_rate =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR);
+	p->mp_fld_vport_qos_data_information_rate_fractional =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_IR_FRACTION);
+	p->mp_fld_vport_qos_data_burst_size =
+		register_get_field(p->mp_reg_vport_qos_data, EPP_VPORT_QOS_DATA_BS);
+
+	p->mp_reg_queue_vport_control =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_CTRL);
+	p->mp_fld_queue_vport_control_adr =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_ADR);
+	p->mp_fld_queue_vport_control_cnt =
+		register_get_field(p->mp_reg_queue_vport_control, EPP_QUEUE_VPORT_CTRL_CNT);
+
+	p->mp_reg_queue_vport_data =
+		module_get_register(p->mp_mod_epp, EPP_QUEUE_VPORT_DATA);
+	p->mp_fld_queue_vport_data_vport =
+		register_get_field(p->mp_reg_queue_vport_data, EPP_QUEUE_VPORT_DATA_VPORT);
+
+	return 0;
+}
+
+int nthw_epp_setup(nthw_epp_t *p)
+{
+	if (p == NULL)
+		return 0;
+
+	/* Set recieps for 2 first records */
+	field_set_val32(p->mp_fld_reciepe_memory_control_cnt, 1);
+
+	/* Zero all categories */
+	for (int i = 0; i < p->mn_epp_categories; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 0);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 0);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+
+	for (int i = 0; i < NRECIPE; ++i) {
+		field_set_val32(p->mp_fld_reciepe_memory_control_adr, i);
+		register_flush(p->mp_reg_reciepe_memory_control, 1);
+
+		field_set_val32(p->mp_fld_reciepe_memory_data_tx_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_mtu_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_tx_port,
+			       rcp_data_size_adjust_txp[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_size_adjust_virtual_port,
+			       rcp_data_size_adjust_vport[i]);
+		field_set_val32(p->mp_fld_reciepe_memory_data_fixed18b_l2_mtu, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_txp_qos_epp_enable, 1);
+		field_set_val32(p->mp_fld_reciepe_memory_data_queue_qos_epp_enable, 1);
+		register_flush(p->mp_reg_reciepe_memory_data, 1);
+	}
+	/* phy mtu setup */
+	field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, i);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	}
+	/* phy QoS setup */
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	for (int i = 0; i < 2; ++i) {
+		field_set_val32(p->mp_fld_txp_qos_control_adr, i);
+		register_flush(p->mp_reg_txp_qos_control, 1);
+
+		field_set_val32(p->mp_fld_txp_qos_data_enable, 0);
+		register_flush(p->mp_reg_txp_qos_data, 1);
+	}
+
+	/* virt mtu setup */
+	field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, i);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, MTUINITVAL);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	}
+
+	/* virt QoS setup */
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	for (int i = 0; i < 128; ++i) {
+		field_set_val32(p->mp_fld_vport_qos_control_adr, i);
+		register_flush(p->mp_reg_vport_qos_control, 1);
+
+		field_set_val32(p->mp_fld_vport_qos_data_enable, 0);
+		register_flush(p->mp_reg_vport_qos_data, 1);
+	}
+
+	return 0;
+}
+
+/*
+ * Set the MTU registers in context with the current setMTU request.
+ */
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type)
+{
+	if (p == NULL)
+		return 0;
+
+	if (port_type == PORT_TYPE_PHYSICAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_txp_port_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_txp_port_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+	} else if (port_type == PORT_TYPE_VIRTUAL) {
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_control_adr, port);
+		field_set_val32(p->mp_fld_queue_mtu_control_cnt, 1);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+
+		/* Set the TXP Mtu control register */
+		field_set_val32(p->mp_fld_queue_mtu_data_max_mtu, max_mtu);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+	} else {
+		NT_LOG(DBG, NTHW, "NthwEpp::%s - port_type unsupported",
+		       __func__);
+		register_reset(p->mp_reg_queue_mtu_control);
+		register_flush(p->mp_reg_queue_mtu_control, 1);
+		register_reset(p->mp_reg_queue_mtu_data);
+		register_flush(p->mp_reg_queue_mtu_data, 1);
+		register_reset(p->mp_reg_txp_port_mtu_control);
+		register_flush(p->mp_reg_txp_port_mtu_control, 1);
+		register_reset(p->mp_reg_txp_port_mtu_data);
+		register_flush(p->mp_reg_txp_port_mtu_data, 1);
+
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_txp_qos_control_adr, port);
+	field_set_val32(p->mp_fld_txp_qos_control_cnt, 1);
+	register_flush(p->mp_reg_txp_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_txp_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_txp_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_txp_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_txp_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_vport_qos_control_adr, port);
+	field_set_val32(p->mp_fld_vport_qos_control_cnt, 1);
+	register_flush(p->mp_reg_vport_qos_control, 1);
+
+	uint32_t enable = ((information_rate | information_rate_fractional |
+			    burst_size) != 0);
+	field_set_val32(p->mp_fld_vport_qos_data_enable, enable);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate, information_rate);
+	field_set_val32(p->mp_fld_vport_qos_data_information_rate_fractional,
+		       information_rate_fractional);
+	field_set_val32(p->mp_fld_vport_qos_data_burst_size, burst_size);
+	register_flush(p->mp_reg_vport_qos_data, 1);
+
+	return 0;
+}
+
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport)
+{
+	if (p == NULL)
+		return 0;
+
+	field_set_val32(p->mp_fld_queue_vport_control_adr, qid);
+	field_set_val32(p->mp_fld_queue_vport_control_cnt, 1);
+	register_flush(p->mp_reg_queue_vport_control, 1);
+
+	field_set_val32(p->mp_fld_queue_vport_data_vport, vport);
+	register_flush(p->mp_reg_queue_vport_data, 1);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_epp.h b/drivers/net/ntnic/nthw/nthw_epp.h
new file mode 100644
index 0000000000..b404c9b61a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_epp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_EPP_HPP_
+#define NTHW_EPP_HPP_
+
+/* VXLAN adds extra 50 bytes */
+#define VXLANDATASIZEADJUST 50
+#define VXLANDATASIZEADJUSTIPV6 70
+#define MTUINITVAL 1500
+#define NRECIPE 3
+
+/* List of size adjust values to put in the recipe memory data register at startup */
+static const int rcp_data_size_adjust_txp[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						   VXLANDATASIZEADJUSTIPV6
+						 };
+static const int rcp_data_size_adjust_vport[NRECIPE] = { 0, VXLANDATASIZEADJUST,
+						     VXLANDATASIZEADJUSTIPV6
+						   };
+
+struct nthw_epp_s {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_epp;
+	int mn_instance;
+	int mn_epp_categories;
+
+	nt_register_t *mp_reg_reciepe_memory_control;
+	nt_field_t *mp_fld_reciepe_memory_control_adr;
+	nt_field_t *mp_fld_reciepe_memory_control_cnt;
+
+	nt_register_t *mp_reg_reciepe_memory_data;
+	nt_field_t *mp_fld_reciepe_memory_data_tx_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_mtu_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_tx_port;
+	nt_field_t *mp_fld_reciepe_memory_data_size_adjust_virtual_port;
+	nt_field_t *mp_fld_reciepe_memory_data_fixed18b_l2_mtu;
+	nt_field_t *mp_fld_reciepe_memory_data_txp_qos_epp_enable;
+	nt_field_t *mp_fld_reciepe_memory_data_queue_qos_epp_enable;
+
+	nt_register_t *mp_reg_txp_port_mtu_control;
+	nt_field_t *mp_fld_txp_port_mtu_control_adr;
+	nt_field_t *mp_fld_txp_port_mtu_control_cnt;
+
+	nt_register_t *mp_reg_txp_port_mtu_data;
+	nt_field_t *mp_fld_txp_port_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_queue_mtu_control;
+	nt_field_t *mp_fld_queue_mtu_control_adr;
+	nt_field_t *mp_fld_queue_mtu_control_cnt;
+
+	nt_register_t *mp_reg_queue_mtu_data;
+	nt_field_t *mp_fld_queue_mtu_data_max_mtu;
+
+	nt_register_t *mp_reg_txp_qos_control;
+	nt_field_t *mp_fld_txp_qos_control_adr;
+	nt_field_t *mp_fld_txp_qos_control_cnt;
+
+	nt_register_t *mp_reg_txp_qos_data;
+	nt_field_t *mp_fld_txp_qos_data_enable;
+	nt_field_t *mp_fld_txp_qos_data_information_rate;
+	nt_field_t *mp_fld_txp_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_txp_qos_data_burst_size;
+
+	nt_register_t *mp_reg_vport_qos_control;
+	nt_field_t *mp_fld_vport_qos_control_adr;
+	nt_field_t *mp_fld_vport_qos_control_cnt;
+
+	nt_register_t *mp_reg_vport_qos_data;
+	nt_field_t *mp_fld_vport_qos_data_enable;
+	nt_field_t *mp_fld_vport_qos_data_information_rate;
+	nt_field_t *mp_fld_vport_qos_data_information_rate_fractional;
+	nt_field_t *mp_fld_vport_qos_data_burst_size;
+
+	nt_register_t *mp_reg_queue_vport_control;
+	nt_field_t *mp_fld_queue_vport_control_adr;
+	nt_field_t *mp_fld_queue_vport_control_cnt;
+
+	nt_register_t *mp_reg_queue_vport_data;
+	nt_field_t *mp_fld_queue_vport_data_vport;
+};
+
+typedef struct nthw_epp_s nthw_epp_t;
+
+nthw_epp_t *nthw_epp_new(void);
+void nthw_epp_delete(nthw_epp_t *p);
+
+int nthw_epp_present(nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_init(nthw_epp_t *p, nt_fpga_t *p_fpga, int n_instance);
+int nthw_epp_setup(nthw_epp_t *p);
+int nthw_epp_set_mtu(nthw_epp_t *p, uint32_t port, uint32_t max_mtu,
+		   nt_meta_port_type_t port_type);
+int nthw_epp_set_txp_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+		      uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_vport_qos(nthw_epp_t *p, uint32_t port, uint32_t information_rate,
+			uint32_t information_rate_fractional, uint32_t burst_size);
+int nthw_epp_set_queue_to_vport(nthw_epp_t *p, uint32_t qid, uint32_t vport);
+
+#endif /* NTHW_EPP_HPP_ */
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.c b/drivers/net/ntnic/nthw/nthw_fpga_model.c
new file mode 100644
index 0000000000..fca13e0f31
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.c
@@ -0,0 +1,1677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <time.h> /* ctime */
+
+#include "nthw_drv.h" /* fpga_info_s */
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+#include "nthw_rac.h"
+#include "ntlog.h"
+
+#include "nthw_fpga_instances.h"
+#include "nthw_fpga_modules_defs.h"
+
+/* Generated code */
+nt_fpga_prod_init_t *nthw_fpga_instances[] = { &nthw_fpga_9563_055_024_0000,
+					       NULL
+					     };
+
+static const struct {
+	const int a;
+	const char *b;
+} sa_nthw_fpga_mod_map[] = {
+	{ MOD_CAT, "CAT" },
+	{ MOD_CB, "CB" },
+	{ MOD_CCIP, "CCIP" },
+	{ MOD_CFP4_CTRL_GBOX, "CFP4_CTRL_GBOX" },
+	{ MOD_COR, "COR" },
+	{ MOD_CPY, "CPY" },
+	{ MOD_CSU, "CSU" },
+	{ MOD_DBS, "DBS" },
+	{ MOD_DDP, "DDP" },
+	{ MOD_EPP, "EPP" },
+	{ MOD_EQM, "EQM" },
+	{ MOD_FHM, "FHM" },
+	{ MOD_FLM, "FLM" },
+	{ MOD_GFG, "GFG" },
+	{ MOD_GMF, "GMF" },
+	{ MOD_GPIO_PHY, "GPIO_PHY" },
+	{ MOD_GPIO_PHY_PORTS, "GPIO_PHY_PORTS" },
+	{ MOD_GPIO_SFPP, "GPIO_SFPP" },
+	{ MOD_HFU, "HFU" },
+	{ MOD_HIF, "HIF" },
+	{ MOD_HSH, "HSH" },
+	{ MOD_HST, "HST" },
+	{ MOD_ICORE_10G, "ICORE_10G" },
+	{ MOD_IFR, "IFR" },
+	{ MOD_IIC, "IIC" },
+	{ MOD_INS, "INS" },
+	{ MOD_IOA, "IOA" },
+	{ MOD_IPF, "IPF" },
+	{ MOD_KM, "KM" },
+	{ MOD_LAO, "LAO" },
+	{ MOD_MAC, "MAC" },
+	{ MOD_MAC10, "MAC10" },
+	{ MOD_MAC100, "MAC100" },
+	{ MOD_MAC10G, "MAC10G" },
+	{ MOD_MAC1G, "MAC1G" },
+	{ MOD_MAC_PCS, "MAC_PCS" },
+	{ MOD_MAC_PCS_XXV, "MAC_PCS_XXV" },
+	{ MOD_MAC_RX, "MAC_RX" },
+	{ MOD_MAC_TFG, "MAC_TFG" },
+	{ MOD_MAC_TX, "MAC_TX" },
+	{ MOD_MCU, "MCU" },
+	{ MOD_MDG, "MDG" },
+	{ MOD_MSK, "MSK" },
+	{ MOD_NIF, "NIF" },
+	{ MOD_PCIE3, "PCIE3" },
+	{ MOD_PCI_RD_TG, "PCI_RD_TG" },
+	{ MOD_PCI_TA, "PCI_TA" },
+	{ MOD_PCI_WR_TG, "PCI_WR_TG" },
+	{ MOD_PCM_NT100A01_01, "PCM_NT100A01_01" },
+	{ MOD_PCM_NT50B01_01, "PCM_NT50B01_01" },
+	{ MOD_PCS, "PCS" },
+	{ MOD_PCS100, "PCS100" },
+	{ MOD_PDB, "PDB" },
+	{ MOD_PDI, "PDI" },
+	{ MOD_PHY10G, "PHY10G" },
+	{ MOD_PHY3S10G, "PHY3S10G" },
+	{ MOD_PM, "PM" },
+	{ MOD_PRM_NT100A01_01, "PRM_NT100A01_01" },
+	{ MOD_PRM_NT50B01_01, "PRM_NT50B01_01" },
+	{ MOD_PTP1588, "PTP1588" },
+	{ MOD_QM, "QM" },
+	{ MOD_QSL, "QSL" },
+	{ MOD_QSPI, "QSPI" },
+	{ MOD_R2DRP, "R2DRP" },
+	{ MOD_RAC, "RAC" },
+	{ MOD_RBH, "RBH" },
+	{ MOD_RFD, "RFD" },
+	{ MOD_RMC, "RMC" },
+	{ MOD_RNTC, "RNTC" },
+	{ MOD_ROA, "ROA" },
+	{ MOD_RPL, "RPL" },
+	{ MOD_RPP_LR, "RPP_LR" },
+	{ MOD_RST7000, "RST7000" },
+	{ MOD_RST7001, "RST7001" },
+	{ MOD_RST9500, "RST9500" },
+	{ MOD_RST9501, "RST9501" },
+	{ MOD_RST9502, "RST9502" },
+	{ MOD_RST9503, "RST9503" },
+	{ MOD_RST9504, "RST9504" },
+	{ MOD_RST9505, "RST9505" },
+	{ MOD_RST9506, "RST9506" },
+	{ MOD_RST9507, "RST9507" },
+	{ MOD_RST9508, "RST9508" },
+	{ MOD_RST9509, "RST9509" },
+	{ MOD_RST9510, "RST9510" },
+	{ MOD_RST9512, "RST9512" },
+	{ MOD_RST9513, "RST9513" },
+	{ MOD_RST9515, "RST9515" },
+	{ MOD_RST9516, "RST9516" },
+	{ MOD_RST9517, "RST9517" },
+	{ MOD_RST9519, "RST9519" },
+	{ MOD_RST9520, "RST9520" },
+	{ MOD_RST9521, "RST9521" },
+	{ MOD_RST9522, "RST9522" },
+	{ MOD_RST9523, "RST9523" },
+	{ MOD_RST9524, "RST9524" },
+	{ MOD_RST9525, "RST9525" },
+	{ MOD_RST9526, "RST9526" },
+	{ MOD_RST9527, "RST9527" },
+	{ MOD_RST9528, "RST9528" },
+	{ MOD_RST9529, "RST9529" },
+	{ MOD_RST9530, "RST9530" },
+	{ MOD_RST9531, "RST9531" },
+	{ MOD_RST9532, "RST9532" },
+	{ MOD_RST9533, "RST9533" },
+	{ MOD_RST9534, "RST9534" },
+	{ MOD_RST9535, "RST9535" },
+	{ MOD_RST9536, "RST9536" },
+	{ MOD_RST9537, "RST9537" },
+	{ MOD_RST9538, "RST9538" },
+	{ MOD_RST9539, "RST9539" },
+	{ MOD_RST9540, "RST9540" },
+	{ MOD_RST9541, "RST9541" },
+	{ MOD_RST9542, "RST9542" },
+	{ MOD_RST9543, "RST9543" },
+	{ MOD_RST9544, "RST9544" },
+	{ MOD_RST9545, "RST9545" },
+	{ MOD_RST9546, "RST9546" },
+	{ MOD_RST9547, "RST9547" },
+	{ MOD_RST9548, "RST9548" },
+	{ MOD_RST9549, "RST9549" },
+	{ MOD_RST9553, "RST9553" },
+	{ MOD_RST9555, "RST9555" },
+	{ MOD_RST9559, "RST9559" },
+	{ MOD_RST9563, "RST9563" },
+	{ MOD_RTD, "RTD" },
+	{ MOD_RTD_HMP, "RTD_HMP" },
+	{ MOD_RTX, "RTX" },
+	{ MOD_SDC, "SDC" },
+	{ MOD_SLC, "SLC" },
+	{ MOD_SLC_LR, "SLC_LR" },
+	{ MOD_SMM, "SMM" },
+	{ MOD_SMM_RX, "SMM_RX" },
+	{ MOD_SMM_TX, "SMM_TX" },
+	{ MOD_SPIM, "SPIM" },
+	{ MOD_SPIS, "SPIS" },
+	{ MOD_STA, "STA" },
+	{ MOD_TBH, "TBH" },
+	{ MOD_TEMPMON, "TEMPMON" },
+	{ MOD_TINT, "TINT" },
+	{ MOD_TMC, "TMC" },
+	{ MOD_TSM, "TSM" },
+	{ MOD_TX_CPY, "TX_CPY" },
+	{ MOD_TX_CSI, "TX_CSI" },
+	{ MOD_TX_CSO, "TX_CSO" },
+	{ MOD_TX_INS, "TX_INS" },
+	{ MOD_TX_RPL, "TX_RPL" },
+	{ 0L, NULL },
+};
+
+/* NOTE: this needs to be (manually) synced with enum */
+static const char *const a_bus_type[] = {
+	"ERR", /* BUS_TYPE_UNKNOWN, */
+	"BAR", /* BUS_TYPE_BAR, */
+	"PCI", /* BUS_TYPE_PCI, */
+	"CCIP", /* BUS_TYPE_CCIP, */
+	"RAB0", /* BUS_TYPE_RAB0, */
+	"RAB1", /* BUS_TYPE_RAB1, */
+	"RAB2", /* BUS_TYPE_RAB2, */
+	"NMB", /* BUS_TYPE_NMB, */
+	"NDM", /* BUS_TYPE_NDM, */
+};
+
+static const char *get_bus_name(int n_bus_type_id)
+{
+	if (n_bus_type_id >= 1 && n_bus_type_id <= (int)ARRAY_SIZE(a_bus_type))
+		return a_bus_type[n_bus_type_id];
+
+	else
+		return "ERR";
+}
+
+/*
+ * Module name lookup by id from array
+ * Uses naive linear search as performance is not an issue here...
+ */
+static const char *nthw_fpga_mod_id_to_str(uint64_t n_fpga_mod_id)
+{
+	int i;
+
+	for (i = 0; i <= (int)ARRAY_SIZE(sa_nthw_fpga_mod_map); i++) {
+		if ((uint64_t)sa_nthw_fpga_mod_map[i].a == n_fpga_mod_id)
+			break;
+	}
+	return (sa_nthw_fpga_mod_map[i].b ? sa_nthw_fpga_mod_map[i].b :
+		"unknown");
+}
+
+/*
+ * Force C linkage for xxx_addr_bases and xxx_module_versions
+ */
+static int read_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id, uint32_t addr,
+		    uint32_t len, uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_read32(p_fpga_info, addr, p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_read32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+static int read_data_tsc(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		       uint32_t addr, uint32_t len, uint32_t *p_data,
+		       uint64_t *p_tsc1, uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	(void)p_tsc1;
+	(void)p_tsc2;
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+
+	return rc;
+}
+
+static int write_data(struct fpga_info_s *p_fpga_info, int n_bus_type_id,
+		     uint32_t addr, uint32_t len, const uint32_t *p_data)
+{
+	int rc = -1;
+
+	assert(p_fpga_info);
+	assert(p_data);
+
+	switch (n_bus_type_id) {
+	case BUS_TYPE_BAR:
+	case BUS_TYPE_PCI:
+		assert(len == 1);
+		nthw_rac_reg_write32(p_fpga_info, addr, *p_data);
+		rc = 0;
+		break;
+	case BUS_TYPE_RAB0:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 0, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB1:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 1, len,
+					 p_data);
+		break;
+	case BUS_TYPE_RAB2:
+		assert(p_fpga_info->mp_nthw_rac);
+		rc = nthw_rac_rab_write32(p_fpga_info->mp_nthw_rac, addr, 2, len,
+					 p_data);
+		break;
+	default:
+		assert(false);
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * FpgaMgr
+ */
+nt_fpga_mgr_t *fpga_mgr_new(void)
+{
+	nt_fpga_mgr_t *p = malloc(sizeof(nt_fpga_mgr_t));
+	return p;
+}
+
+void fpga_mgr_delete(nt_fpga_mgr_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_mgr_t));
+	free(p);
+}
+
+void fpga_mgr_init(nt_fpga_mgr_t *p)
+{
+	size_t i;
+
+	/* Count fpga instance in array */
+	p->mpa_fpga_prod_init = nthw_fpga_instances;
+	for (i = 0; i < ARRAY_SIZE(nthw_fpga_instances); i++) {
+		if (p->mpa_fpga_prod_init[i] == NULL)
+			break;
+	}
+	p->mn_fpgas = (int)i;
+}
+
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	const int n_fpga_prod = FPGAID_TO_PRODUCTCODE(n_fpga_id);
+	const int n_fpga_ver = FPGAID_TO_VERSIONCODE(n_fpga_id);
+	const int n_fpga_rev = FPGAID_TO_REVISIONCODE(n_fpga_id);
+
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (p_init->fpga_product_id == n_fpga_prod &&
+				p_init->fpga_version == n_fpga_ver &&
+				p_init->fpga_revision == n_fpga_rev) {
+			{
+				nt_fpga_t *p_fpga = fpga_new();
+
+				fpga_init(p_fpga, p_init, p_fpga_info);
+				return p_fpga;
+			}
+		}
+	}
+
+	NT_LOG(ERR, NTHW,
+	       "FPGA Id 0x%" PRIX64 ": %04d: %d.%d: no match found\n", n_fpga_id,
+	       n_fpga_prod, n_fpga_ver, n_fpga_rev);
+
+	return NULL;
+}
+
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *fh_out, int detail_level)
+{
+	int i;
+
+	fprintf(fh_out, "\n"); /* start of records */
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init = p->mpa_fpga_prod_init[i];
+
+		if (detail_level == 0) {
+			fprintf(fh_out, "%04d-%02d-%02d\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision);
+		} else {
+			time_t fpga_build_time = p_init->fpga_build_time;
+
+			fprintf(fh_out, "%04d-%02d-%02d: 0x%08lX: %s\n",
+				p_init->fpga_product_id, p_init->fpga_version,
+				p_init->fpga_revision, fpga_build_time,
+				(fpga_build_time ? ctime(&fpga_build_time) :
+				 "NA\n"));
+		}
+	}
+	fprintf(fh_out, "\n"); /* end of records */
+	fflush(fh_out);
+}
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: fpgas=%d\n", __func__, p->mn_fpgas);
+	for (i = 0; i < p->mn_fpgas; i++) {
+		nt_fpga_prod_init_t *p_init _unused = p->mpa_fpga_prod_init[i];
+		NT_LOG(DBG, NTHW, "%s: fpga=%d/%d: %04d-%02d-%02d\n", __func__,
+		       i, p->mn_fpgas, p_init->fpga_product_id, p_init->fpga_version,
+		       p_init->fpga_revision);
+	}
+}
+
+/*
+ * Fpga
+ */
+nt_fpga_t *fpga_new(void)
+{
+	nt_fpga_t *p = malloc(sizeof(nt_fpga_t));
+
+	if (p)
+		memset(p, 0, sizeof(nt_fpga_t));
+	return p;
+}
+
+void fpga_delete(nt_fpga_t *p)
+{
+	memset(p, 0, sizeof(nt_fpga_t));
+	free(p);
+}
+
+void fpga_delete_all(nt_fpga_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_delete(p_mod);
+	}
+
+	fpga_delete(p);
+}
+
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info)
+{
+	int i;
+
+	p->p_fpga_info = p_fpga_info;
+	p->mp_init = fpga_prod_init;
+
+	p->m_item_id = fpga_prod_init->fpga_item_id;
+	p->m_product_id = fpga_prod_init->fpga_product_id;
+	p->m_fpga_version = fpga_prod_init->fpga_version;
+	p->m_fpga_revision = fpga_prod_init->fpga_revision;
+	p->m_fpga_patch_no = fpga_prod_init->fpga_patch_no;
+	p->m_fpga_build_no = fpga_prod_init->fpga_build_no;
+	p->m_fpga_build_time = fpga_prod_init->fpga_build_time;
+
+	p->mn_params = fpga_prod_init->nb_prod_params;
+
+	if (p->mn_params) {
+		p->mpa_params = malloc(p->mn_params * sizeof(nt_param_t *));
+		if (p->mpa_params) {
+			memset(p->mpa_params, 0,
+			       (p->mn_params * sizeof(nt_param_t *)));
+			for (i = 0; i < p->mn_params; i++) {
+				nt_param_t *p_param = param_new();
+
+				param_init(p_param, p,
+					   &fpga_prod_init->product_params[i]);
+				p->mpa_params[i] = p_param;
+			}
+		}
+	}
+
+	p->mn_modules = fpga_prod_init->nb_modules;
+
+	if (p->mn_modules) {
+		p->mpa_modules =
+			malloc(fpga_prod_init->nb_modules * sizeof(nt_module_t *));
+		if (p->mpa_modules) {
+			memset(p->mpa_modules, 0,
+			       (p->mn_modules * sizeof(nt_module_t *)));
+			for (i = 0; i < p->mn_modules; i++) {
+				nt_module_t *p_mod = module_new();
+
+				module_init(p_mod, p, &fpga_prod_init->modules[i]);
+				p->mpa_modules[i] = p_mod;
+			}
+		}
+	}
+}
+
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode)
+{
+	int i;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod)
+			module_set_debug_mode(p_mod, n_debug_mode);
+	}
+}
+
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		if (p_mod->m_mod_id == id && p_mod->m_instance == instance)
+			return p_mod;
+	}
+	return NULL;
+}
+
+bool fpga_query(nt_fpga_t *p, int id, int instance)
+{
+	return (fpga_query_module(p, id, instance) != NULL);
+}
+
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance)
+{
+	int i;
+
+	for (i = 0; i < p->mp_init->nb_modules; i++) {
+		nt_fpga_module_init_t *p_mod_init = &p->mp_init->modules[i];
+
+		if (p_mod_init->id == id && p_mod_init->instance == instance)
+			return p_mod_init;
+	}
+	return NULL;
+}
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int n_default_value)
+{
+	int i;
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_param = p->mpa_params[i];
+
+		if (p_param->param_id == n_param_id)
+			return p_param->param_value;
+	}
+
+	return n_default_value;
+}
+
+int fpga_get_product_id(const nt_fpga_t *p)
+{
+	return p->m_product_id;
+}
+
+int fpga_get_fpga_version(const nt_fpga_t *p)
+{
+	return p->m_fpga_version;
+}
+
+int fpga_get_fpga_revision(const nt_fpga_t *p)
+{
+	return p->m_fpga_revision;
+}
+
+void fpga_log_info(const nt_fpga_t *p _unused)
+{
+	NT_LOG(INF, NTHW, "FPGA: %d-%d-%d-%d-%d-%d (%08X)\n", p->m_item_id,
+	       p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->m_fpga_patch_no, p->m_fpga_build_no, p->m_fpga_build_time);
+}
+
+void fpga_dump(const nt_fpga_t *p)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d ver=%d.%d params=%d modules=%d\n",
+	       __func__, p->m_product_id, p->m_fpga_version, p->m_fpga_revision,
+	       p->mn_params, p->mn_modules);
+	fpga_dump_params(p);
+	fpga_dump_modules(p);
+}
+
+void fpga_dump_params(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: params=%d\n", __func__, p->mn_params);
+
+	for (i = 0; i < p->mn_params; i++) {
+		nt_param_t *p_par = p->mpa_params[i];
+
+		param_dump(p_par);
+	}
+}
+
+void fpga_dump_modules(const nt_fpga_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: modules=%d\n", __func__, p->mn_modules);
+
+	for (i = 0; i < p->mn_modules; i++) {
+		nt_module_t *p_mod = p->mpa_modules[i];
+
+		module_dump(p_mod);
+	}
+}
+
+/*
+ * Param
+ */
+nt_param_t *param_new(void)
+{
+	nt_param_t *p = malloc(sizeof(nt_param_t));
+	return p;
+}
+
+void param_delete(nt_param_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nt_param_t));
+		free(p);
+	}
+}
+
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init)
+{
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->param_id = p_init->param_id;
+	p->param_value = p_init->param_value;
+}
+
+void param_dump(const nt_param_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: id=%d value=%d\n", __func__, p->param_id,
+	       p->param_value);
+}
+
+/*
+ * Module
+ */
+nt_module_t *module_new(void)
+{
+	nt_module_t *p = malloc(sizeof(nt_module_t));
+	return p;
+}
+
+void module_delete(nt_module_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		if (p_reg)
+			register_delete(p_reg);
+	}
+	memset(p, 0, sizeof(nt_module_t));
+	free(p);
+}
+
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga, nt_fpga_module_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_fpga;
+	p->mp_init = p_init;
+
+	p->m_mod_id = p_init->id;
+	p->m_instance = p_init->instance;
+
+	/* Copy debug mode from owner */
+	if (p->mp_owner)
+		p->m_debug_mode = p->mp_owner->m_debug_mode;
+
+	else
+		p->m_debug_mode = 0;
+
+	p->m_mod_def_id = p_init->def_id;
+	p->m_major_version = p_init->major_version;
+	p->m_minor_version = p_init->minor_version;
+	p->m_bus = p_init->bus_id;
+	p->m_addr_base = p_init->addr_base;
+
+	p->mn_registers = p_init->nb_registers;
+	if (p->mn_registers) {
+		p->mpa_registers =
+			malloc(p->mn_registers * sizeof(nt_register_t *));
+		if (p->mpa_registers) {
+			memset(p->mpa_registers, 0,
+			       (p->mn_registers * sizeof(nt_register_t *)));
+			for (i = 0; i < p->mn_registers; i++) {
+				nt_register_t *p_reg = register_new();
+
+				register_init(p_reg, p, &p_init->registers[i]);
+				p->mpa_registers[i] = p_reg;
+			}
+		}
+	}
+}
+
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode)
+{
+	nt_fpga_module_init_t *p_init = NULL;
+
+	p_init = fpga_lookup_init(p_fpga, mod_id, instance);
+	module_init(p, p_fpga, p_init);
+
+	/* set debug mode after regulat init... */
+	p->m_debug_mode = debug_mode;
+}
+
+void module_dump(const nt_module_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s: id=%d inst=%d def=%d ver=%d.%d busid=%d base=0x%X regs=%d\n",
+	       __func__, p->m_mod_id, p->m_instance, p->m_mod_def_id,
+	       p->m_major_version, p->m_minor_version, p->m_bus, p->m_addr_base,
+	       p->mn_registers);
+	module_dump_registers(p);
+}
+
+void module_dump_registers(const nt_module_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s: regs=%d\n", __func__, p->mn_registers);
+
+	for (i = 0; i < p->mn_registers; i++) {
+		nt_register_t *p_reg = p->mpa_registers[i];
+
+		register_dump(p_reg);
+	}
+}
+
+int module_get_major_version(const nt_module_t *p)
+{
+	return p->m_major_version;
+}
+
+int module_get_minor_version(const nt_module_t *p)
+{
+	return p->m_minor_version;
+}
+
+uint64_t module_get_version_packed64(const nt_module_t *p)
+{
+	return (((uint64_t)p->m_major_version & 0xFFFFFFFF) << 32) |
+	       (p->m_minor_version & 0xFFFFFFFF);
+}
+
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version)
+{
+	if (major_version == p->m_major_version)
+		return p->m_minor_version >= minor_version;
+	return p->m_major_version >= major_version;
+}
+
+static nt_register_t *module_lookup_register(nt_module_t *p, uint32_t id)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		if (p->mpa_registers[i]->m_id == id) {
+			p_register = p->mpa_registers[i];
+			break;
+		}
+	}
+	return p_register;
+}
+
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id)
+{
+	nt_register_t *p_register;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal module context for register %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_register = module_lookup_register(p, id);
+	if (!p_register) {
+		NT_LOG(ERR, NTHW, "Register %d not found in module: %s (%d)\n",
+		       id, nthw_fpga_mod_id_to_str(p->m_mod_id), p->m_mod_id);
+	}
+	return p_register;
+}
+
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id)
+{
+	return module_lookup_register(p, id);
+}
+
+int module_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void module_set_debug_mode(nt_module_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_register_t *p_register = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_registers; i++) {
+		p_register = p->mpa_registers[i];
+		if (p_register)
+			register_set_debug_mode(p_register, n_debug_mode);
+	}
+}
+
+int module_get_bus(const nt_module_t *p)
+{
+	return p->m_bus;
+}
+
+uint32_t module_get_addr_base(const nt_module_t *p)
+{
+	return p->m_addr_base;
+}
+
+void module_unsuppported(const nt_module_t *p)
+{
+	NT_LOG(ERR, NTHW, "Module %d not supported", p->mp_init->id);
+}
+
+/*
+ * Register
+ */
+nt_register_t *register_new(void)
+{
+	nt_register_t *p = malloc(sizeof(nt_register_t));
+	return p;
+}
+
+void register_delete(nt_register_t *p)
+{
+	int i;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		nt_field_t *p_field = p->mpa_fields[i];
+
+		if (p_field)
+			field_delete(p_field);
+	}
+
+	if (p->mp_shadow)
+		free(p->mp_shadow);
+
+	if (p->mp_dirty)
+		free(p->mp_dirty);
+
+	memset(p, 0, sizeof(nt_register_t));
+	free(p);
+}
+
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init)
+{
+	int i;
+
+	p->mp_owner = p_module;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_addr_rel = p_init->addr_rel;
+	p->m_addr = p_module->m_addr_base + p_init->addr_rel;
+	p->m_type = p_init->type;
+	p->m_len =
+		((p_init->bw != (uint16_t)-1) ?
+		 ((p_init->bw + 31) >> 5) :
+		 1); /* Old P200 registers have no bw at register level - default to BW=-1 */
+	p->m_debug_mode = p_module->m_debug_mode;
+
+	p->mn_fields = p_init->nb_fields;
+	if (p->mn_fields) {
+		p->mpa_fields = malloc(p->mn_fields * sizeof(nt_field_t *));
+
+		if (p->mpa_fields) {
+			memset(p->mpa_fields, 0,
+			       (p->mn_fields * sizeof(nt_field_t *)));
+			for (i = 0; i < p->mn_fields; i++) {
+				nt_field_t *p_field = field_new();
+
+				field_init(p_field, p, &p_init->fields[i]);
+				p->mpa_fields[i] = p_field;
+			}
+
+			p->mp_shadow = malloc(p->m_len * sizeof(uint32_t));
+			if (p->mp_shadow) {
+				memset(p->mp_shadow, 0x00,
+				       (p->m_len * sizeof(uint32_t)));
+			}
+
+			p->mp_dirty = malloc(p->m_len * sizeof(bool));
+			if (p->mp_dirty) {
+				memset(p->mp_dirty, 0x00,
+				       (p->m_len * sizeof(bool)));
+			}
+		}
+	}
+}
+
+void register_dump(const nt_register_t *p)
+{
+	NT_LOG(DBG, NTHW,
+	       "%s(id=%d type=%d addr=0x%08X addrrel=0x%08X len=%d bw=%d\n",
+	       __func__, p->m_id, p->m_type, p->m_addr, p->mn_addr_rel, p->m_len,
+	       p->mn_bit_width);
+	register_dump_fields(p);
+}
+
+void register_dump_fields(const nt_register_t *p)
+{
+	int i;
+
+	NT_LOG(DBG, NTHW, "%s(addr=0x%08X fields=%d\n", __func__, p->m_addr,
+	       p->mn_fields);
+	for (i = 0; i < p->mn_fields; i++)
+		field_dump(p->mpa_fields[i]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+uint32_t register_get_address(const nt_register_t *p)
+{
+	return p->m_addr;
+}
+
+void register_reset(const nt_register_t *p)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_reset(p_field);
+	}
+}
+
+static nt_field_t *register_lookup_field(const nt_register_t *p, uint32_t id)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	if (!p)
+		return NULL;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		if (p->mpa_fields[i]->m_id == id) {
+			p_field = p->mpa_fields[i];
+			break;
+		}
+	}
+	return p_field;
+}
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id)
+{
+	nt_field_t *p_field;
+
+	if (p == NULL) {
+		NT_LOG(ERR, NTHW, "Illegal register context for field %d\n",
+		       id);
+		return NULL;
+	}
+
+	p_field = register_lookup_field(p, id);
+	if (!p_field) {
+		NT_LOG(ERR, NTHW, "Field %d not found in module: %s (%d)\n", id,
+		       nthw_fpga_mod_id_to_str(p->mp_owner->m_mod_id),
+		       p->mp_owner->m_mod_id);
+	}
+	return p_field;
+}
+
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id)
+{
+	return register_lookup_field(p, id);
+}
+
+int register_get_bit_width(const nt_register_t *p)
+{
+	return p->mn_bit_width;
+}
+
+uint32_t register_get_addr_rel(const nt_register_t *p)
+{
+	return p->mn_addr_rel;
+}
+
+int register_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+/*
+ * NOTE: do not set debug on fields - as register operation dumps typically are enough
+ */
+void register_set_debug_mode(nt_register_t *p, unsigned int n_debug_mode)
+{
+	int i;
+	nt_field_t *p_field = NULL;
+
+	p->m_debug_mode = n_debug_mode;
+
+	for (i = 0; i < p->mn_fields; i++) {
+		p_field = p->mpa_fields[i];
+		if (p_field)
+			field_set_debug_mode(p_field, n_debug_mode);
+	}
+}
+
+static int register_read_data(const nt_register_t *p)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = read_data(p_fpga_info, n_bus_type_id, addr, len, p_data);
+	return rc;
+}
+
+static int register_read_data_tsc(const nt_register_t *p, uint64_t *p_tsc1,
+				uint64_t *p_tsc2)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+
+	rc = read_data_tsc(p_fpga_info, n_bus_type_id, addr, len, p_data, p_tsc1, p_tsc2);
+
+	return rc;
+}
+
+static int register_write_data(const nt_register_t *p, uint32_t cnt)
+{
+	int rc = -1;
+
+	const int n_bus_type_id = module_get_bus(p->mp_owner);
+	const uint32_t addr = p->m_addr;
+	const uint32_t len = p->m_len;
+	uint32_t *const p_data = p->mp_shadow;
+
+	struct fpga_info_s *p_fpga_info = NULL;
+
+	if (p && p->mp_owner && p->mp_owner->mp_owner)
+		p_fpga_info = p->mp_owner->mp_owner->p_fpga_info;
+	assert(p_fpga_info);
+	assert(p_data);
+
+	rc = write_data(p_fpga_info, n_bus_type_id, addr, (len * cnt), p_data);
+
+	return rc;
+}
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	for (i = 0; i < len; i++)
+		p_data[i] = p->mp_shadow[i];
+}
+
+uint32_t register_get_val32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_update(const nt_register_t *p)
+{
+	if (p && p->m_type != REGISTER_TYPE_WO) {
+		const char *const p_dev_name _unused = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+
+		const char *const p_bus_name _unused = get_bus_name(n_bus_type_id);
+		const uint32_t addr _unused = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+
+		register_read_data(p);
+		if (p->m_debug_mode & ON_READ) {
+			uint32_t i = len;
+
+			uint32_t *ptr _unused = p_data;
+			NT_LOG(DBG, NTHW,
+			       "Register::read(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+			       p_dev_name, p_bus_name, addr, len);
+			while (i--)
+				NT_LOG(DBG, NTHW, " 0x%08X ", *ptr++);
+			NT_LOG(DBG, NTHW, ")\n");
+		}
+	}
+}
+
+uint32_t register_get_val_updated32(const nt_register_t *p)
+{
+	uint32_t val = 0;
+
+	register_update(p);
+	register_get_val(p, &val, 1);
+	return val;
+}
+
+void register_make_dirty(nt_register_t *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < p->m_len; i++)
+		p->mp_dirty[i] = true;
+}
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	assert(len <= p->m_len);
+	assert(p_data);
+
+	if (len == (uint32_t)-1 || len > p->m_len)
+		len = p->m_len;
+
+	if (p->mp_shadow != p_data)
+		memcpy(p->mp_shadow, p_data, (len * sizeof(uint32_t)));
+}
+
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data, uint32_t len)
+{
+	register_set_val(p, p_data, len);
+	register_flush(p, 1);
+}
+
+void register_flush(const nt_register_t *p, uint32_t cnt)
+{
+	int rc;
+
+	if (p->m_type != REGISTER_TYPE_RO) {
+		const char *const p_dev_name = "NA";
+		const int n_bus_type_id = module_get_bus(p->mp_owner);
+		const char *p_bus_name = get_bus_name(n_bus_type_id);
+		const uint32_t addr = p->m_addr;
+		const uint32_t len = p->m_len;
+		uint32_t *const p_data = p->mp_shadow;
+		uint32_t i;
+
+		assert(len * cnt <= 256);
+
+		if (p->m_debug_mode & ON_WRITE) {
+			uint32_t i = len * cnt;
+			uint32_t *ptr = p_data;
+			char *tmp_string =
+				ntlog_helper_str_alloc("Register::write");
+			ntlog_helper_str_add(tmp_string,
+					     "(Dev: %s, Bus: %s, Addr: 0x%08X, _cnt: %d, Data:",
+					     p_dev_name, p_bus_name, addr, i);
+			while (i--) {
+				ntlog_helper_str_add(tmp_string, " 0x%08X",
+						     *ptr++);
+			}
+			ntlog_helper_str_add(tmp_string, ")\n");
+			NT_LOG(DBG, NTHW, "%s", tmp_string);
+			ntlog_helper_str_free(tmp_string);
+		}
+
+		rc = register_write_data(p, cnt);
+
+		if (rc)
+			NT_LOG(ERR, NTHW, "Register write error %d\n", rc);
+
+		for (i = 0; i < cnt; i++)
+			p->mp_dirty[i] = false;
+	}
+}
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2)
+{
+	register_read_data_tsc(p, tsc1, tsc2);
+}
+
+void register_clr(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+void register_set(nt_register_t *p)
+{
+	memset(p->mp_shadow, 0xff, p->m_len * sizeof(uint32_t));
+	register_make_dirty(p);
+}
+
+/*
+ * Field
+ */
+nt_field_t *field_new(void)
+{
+	nt_field_t *p = malloc(sizeof(nt_field_t));
+	return p;
+}
+
+void field_delete(nt_field_t *p)
+{
+	memset(p, 0, sizeof(nt_field_t));
+	free(p);
+}
+
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init)
+{
+	p->mp_owner = p_reg;
+
+	p->m_debug_mode = p_reg->m_debug_mode;
+
+	p->m_id = p_init->id;
+	p->mn_bit_width = p_init->bw;
+	p->mn_bit_pos_low = p_init->low;
+	p->m_reset_val = (uint32_t)p_init->reset_val;
+	p->m_first_word = p_init->low / 32;
+	p->m_first_bit = p_init->low % 32;
+	p->m_front_mask = 0;
+	p->m_body_length = 0;
+	p->mn_words = (p_init->bw + 0x1f) / 0x20;
+	p->m_tail_mask = 0;
+
+	{
+		int bits_remaining = p_init->bw;
+		int front_mask_length = 32 - p->m_first_bit;
+
+		if (front_mask_length > bits_remaining)
+			front_mask_length = bits_remaining;
+		bits_remaining -= front_mask_length;
+
+		p->m_front_mask = (uint32_t)(((1ULL << front_mask_length) - 1)
+					   << p->m_first_bit);
+
+		p->m_body_length = bits_remaining / 32;
+		bits_remaining -= p->m_body_length * 32;
+		p->m_tail_mask = (1 << bits_remaining) - 1;
+
+		if (p->m_debug_mode >= 0x100) {
+			NT_LOG(DBG, NTHW,
+			       "%s: fldid=%08d: [%08d:%08d] %08d/%08d: (%08d,%08d) (0x%08X,%08d,0x%08X)\n",
+			       __func__, p_init->id, p_init->low,
+			       (p_init->low + p_init->bw), p_init->bw,
+			       ((p_init->bw + 31) / 32), p->m_first_word,
+			       p->m_first_bit, p->m_front_mask, p->m_body_length,
+			       p->m_tail_mask);
+		}
+	}
+}
+
+int field_get_debug_mode(const nt_module_t *p)
+{
+	return p->m_debug_mode;
+}
+
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode)
+{
+	p->m_debug_mode = n_debug_mode;
+}
+
+int field_get_bit_width(const nt_field_t *p)
+{
+	return p->mn_bit_width;
+}
+
+int field_get_bit_pos_low(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low;
+}
+
+int field_get_bit_pos_high(const nt_field_t *p)
+{
+	return p->mn_bit_pos_low + p->mn_bit_width - 1;
+}
+
+uint32_t field_get_mask(const nt_field_t *p)
+{
+	return p->m_front_mask;
+}
+
+void field_reset(const nt_field_t *p)
+{
+	field_set_val32(p, (uint32_t)p->m_reset_val);
+}
+
+uint32_t field_get_val_mask(const nt_field_t *p)
+{
+	return (p->m_front_mask >> p->mn_bit_pos_low);
+}
+
+uint32_t field_get_reset_val(const nt_field_t *p)
+{
+	return (uint32_t)p->m_reset_val;
+}
+
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = p->mp_owner->mp_shadow[shadow_index++] & p->m_front_mask;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++];
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		p_data[data_index++] = buf.w32[0];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask)
+		buf.w32[1] = p->mp_owner->mp_shadow[shadow_index++] & p->m_tail_mask;
+
+	else
+		buf.w32[1] = 0;
+	buf.w64 = buf.w64 >> (p->m_first_bit);
+	p_data[data_index++] = buf.w32[0];
+	if (data_index < p->mn_words)
+		p_data[data_index++] = buf.w32[1];
+}
+
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	uint32_t i;
+	uint32_t data_index = 0;
+	uint32_t shadow_index = p->m_first_word;
+
+	union {
+		uint32_t w32[2];
+		uint64_t w64;
+	} buf;
+
+	(void)len;
+	assert(len == p->mn_words);
+
+	/* handle front */
+	buf.w32[0] = 0;
+	buf.w32[1] = p_data[data_index++];
+	buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+	p->mp_owner->mp_shadow[shadow_index] =
+		(p->mp_owner->mp_shadow[shadow_index] & ~p->m_front_mask) |
+		(buf.w32[0] & p->m_front_mask);
+	shadow_index++;
+
+	/* handle body */
+	for (i = 0; i < p->m_body_length; i++) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		assert(data_index < len);
+		buf.w32[1] = p_data[data_index++];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index++] = buf.w32[0];
+	}
+
+	/* handle tail */
+	if (p->m_tail_mask) {
+		buf.w64 = buf.w64 >> (p->m_first_bit);
+		if (data_index < len)
+			buf.w32[1] = p_data[data_index];
+		buf.w64 = buf.w64 >> (32 - p->m_first_bit);
+		p->mp_owner->mp_shadow[shadow_index] =
+			(p->mp_owner->mp_shadow[shadow_index] & ~p->m_tail_mask) |
+			(buf.w32[0] & p->m_tail_mask);
+	}
+
+	register_make_dirty(p->mp_owner);
+}
+
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data, uint32_t len)
+{
+	field_set_val(p, p_data, len);
+	field_flush_register(p);
+}
+
+uint32_t field_get_val32(const nt_field_t *p)
+{
+	uint32_t val;
+
+	field_get_val(p, &val, 1);
+	return val;
+}
+
+uint32_t field_get_updated(const nt_field_t *p)
+{
+	uint32_t val;
+
+	register_update(p->mp_owner);
+	field_get_val(p, &val, 1);
+
+	return val;
+}
+
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2)
+{
+	register_do_read_trig_ts(p->mp_owner, tsc1, tsc2);
+}
+
+void field_update_register(const nt_field_t *p)
+{
+	register_update(p->mp_owner);
+}
+
+void field_flush_register(const nt_field_t *p)
+{
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_val32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+}
+
+void field_set_val_flush32(const nt_field_t *p, uint32_t val)
+{
+	field_set_val(p, &val, 1);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_clr_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, 0);
+}
+
+void field_clr_flush(const nt_field_t *p)
+{
+	field_clr_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+void field_set_all(const nt_field_t *p)
+{
+	assert(p->m_body_length == 0);
+	field_set_val32(p, ~0);
+}
+
+void field_set_flush(const nt_field_t *p)
+{
+	field_set_all(p);
+	register_flush(p->mp_owner, 1);
+}
+
+enum field_match {
+	FIELD_MATCH_CLR_ALL,
+	FIELD_MATCH_SET_ALL,
+	FIELD_MATCH_CLR_ANY,
+	FIELD_MATCH_SET_ANY,
+};
+
+static int field_wait_cond32(const nt_field_t *p, enum field_match e_match,
+			    int n_poll_iterations, int n_poll_interval)
+{
+	const uint32_t n_mask = (1 << p->mn_bit_width) - 1;
+
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100; /* usec */
+
+	if (p->m_debug_mode) {
+		const char *const p_cond_name _unused =
+			((e_match == FIELD_MATCH_SET_ALL) ?
+			 "SetAll" :
+			 ((e_match == FIELD_MATCH_CLR_ALL) ?
+			  "ClrAll" :
+			  ((e_match == FIELD_MATCH_CLR_ANY) ?
+			   "ClrAny" :
+			   "SetAny")));
+		const char *const p_dev_name _unused = "NA";
+		const char *const p_bus_name _unused =
+			get_bus_name(module_get_bus(p->mp_owner->mp_owner));
+		uint32_t n_reg_addr _unused = register_get_address(p->mp_owner);
+
+		uint32_t n_reg_mask _unused =
+			(((1 << p->mn_bit_width) - 1) << p->mn_bit_pos_low);
+
+		NT_LOG(DBG, NTHW,
+		       "Register::Field::wait%s32(Dev: %s, Bus: %s, Addr: 0x%08X, Mask: 0x%08X, Iterations: %d, Interval: %d)\n",
+		       p_cond_name, p_dev_name, p_bus_name, n_reg_addr, n_reg_mask,
+		       n_poll_iterations, n_poll_interval);
+	}
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (e_match == FIELD_MATCH_SET_ANY && val != 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_SET_ALL && val == n_mask) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ALL && val == 0) {
+			return 0;
+		} else if (e_match == FIELD_MATCH_CLR_ANY) {
+			uint32_t mask = field_get_mask(p);
+
+			if (val != mask)
+				return 0;
+		}
+
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ALL, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_SET_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval)
+{
+	return field_wait_cond32(p, FIELD_MATCH_CLR_ANY, n_poll_iterations,
+				n_poll_interval);
+}
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval)
+{
+	if (n_poll_iterations == -1)
+		n_poll_iterations = 10000;
+	if (n_poll_interval == -1)
+		n_poll_interval = 100;
+
+	while (true) {
+		uint32_t val = field_get_updated(p);
+
+		if (val == (n_wait_cond_value & n_wait_cond_mask))
+			break;
+		n_poll_iterations--;
+		if (n_poll_iterations <= 0)
+			return -1;
+		NT_OS_WAIT_USEC(n_poll_interval);
+	}
+	return 0;
+}
+
+void field_dump(const nt_field_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: %02d: %X\n", __func__,
+	       p->m_id, p->mn_bit_pos_low, (p->mn_bit_pos_low + p->mn_bit_width),
+	       p->mn_bit_width, p->mn_words, p->m_reset_val);
+}
+
+void field_dump_val(const nt_field_t *p)
+{
+	int i;
+	uint32_t buf[32];
+
+	field_get_val(p, buf, p->mn_words);
+	NT_LOG(DBG, NTHW, " @%d:", p->m_first_bit + p->m_first_word * 32);
+	NT_LOG(DBG, NTHW, "%X", buf[p->mn_words - 1]);
+	for (i = p->mn_words - 1; i > 0; i--)
+		NT_LOG(DBG, NTHW, "%08X", buf[i - 1]);
+	NT_LOG(DBG, NTHW, "\n");
+}
+
+void field_dump_init(const nt_fpga_field_init_t *p _unused)
+{
+	NT_LOG(DBG, NTHW, "%s: %02d: %02d %02d %02d: 0x%" PRIX64 "\n", __func__,
+	       p->id, p->low, p->low + p->bw, p->bw, p->reset_val);
+}
+
+/*
+ * nthw fpga model helpers
+ */
+
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident)
+{
+	nt_fpga_mgr_t *p_fpga_mgr = NULL;
+	nt_fpga_t *p_fpga = NULL;
+	int n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id, n_fpga_rev_id;
+	char s_fpga_prod_ver_rev_str[32];
+
+	p_fpga_mgr = fpga_mgr_new();
+	fpga_mgr_init(p_fpga_mgr);
+	p_fpga = fpga_mgr_query_fpga(p_fpga_mgr, n_fpga_ident, p_fpga_info);
+
+	n_fpga_type_id = FPGAID_TO_PRODUCTTYPE(n_fpga_ident);
+	n_fpga_prod_id = FPGAID_TO_PRODUCTCODE(n_fpga_ident);
+	n_fpga_ver_id = FPGAID_TO_VERSIONCODE(n_fpga_ident);
+	n_fpga_rev_id = FPGAID_TO_REVISIONCODE(n_fpga_ident);
+
+	snprintf(s_fpga_prod_ver_rev_str, sizeof(s_fpga_prod_ver_rev_str),
+		 "%04d-%04d-%02d-%02d", n_fpga_type_id, n_fpga_prod_id, n_fpga_ver_id,
+		 n_fpga_rev_id);
+
+	if (p_fpga == NULL) {
+		NT_LOG(ERR, NTHW, "%s: no match for FPGA: %s\n", __func__,
+		       s_fpga_prod_ver_rev_str);
+		/* do not return here... */
+	}
+
+	if (p_fpga_mgr) {
+		fpga_mgr_delete(p_fpga_mgr);
+		p_fpga_mgr = NULL;
+	}
+
+	return p_fpga;
+}
+
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance)
+{
+	nt_module_t *p_mod = fpga_query_module(p_fpga, n_mod, n_instance);
+	return p_mod;
+}
+
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg)
+{
+	nt_register_t *p_reg = module_get_register(p_mod, n_reg);
+	return p_reg;
+}
+
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld)
+{
+	nt_field_t *p_fld = register_get_field(p_reg, n_fld);
+	return p_fld;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_fpga_model.h b/drivers/net/ntnic/nthw/nthw_fpga_model.h
new file mode 100644
index 0000000000..b00b7b6cfa
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_fpga_model.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_FPGA_MODEL_H__
+#define __NTHW_FPGA_MODEL_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "fpga_model.h"
+
+#ifndef FPGAID_TO_PRODUCTCODE
+#define FPGAID_TO_PRODUCTTYPE(fpga_id) ((uint16_t)((fpga_id) >> 32) & 0xFF)
+#define FPGAID_TO_PRODUCTCODE(fpga_id) ((uint16_t)((fpga_id) >> 16) & 0xFFFF)
+#define FPGAID_TO_VERSIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 8 & 0xFF))
+#define FPGAID_TO_REVISIONCODE(fpga_id) ((uint16_t)((fpga_id) >> 0 & 0xFF))
+#endif
+
+#define VERSION_PACKED64(_major_, _minor_) \
+	((((uint64_t)(_major_) & 0xFFFFFFFF) << 32) | ((_minor_) & 0xFFFFFFFF))
+
+enum debug_mode { NO_DEBUG, ON_READ, ON_WRITE };
+
+enum nthw_bus_type {
+	NTHW_BUS_UNKNOWN,
+	NTHW_BUS_BAR,
+	NTHW_BUS_PCI,
+	NTHW_BUS_NMB,
+	NTHW_BUS_NDM,
+	NTHW_BUS_RAB0,
+	NTHW_BUS_RAB1,
+	NTHW_BUS_RAB2
+};
+
+struct nt_fpga_s;
+
+struct nt_param_s;
+
+struct nt_module_s;
+
+struct nt_register_s;
+
+struct nt_field_s;
+
+struct nt_fpga_mgr_s {
+	int mn_fpgas;
+	struct nt_fpga_prod_init **mpa_fpga_prod_init;
+};
+
+typedef struct nt_fpga_mgr_s nt_fpga_mgr_t;
+
+struct nt_fpga_s {
+	struct fpga_info_s *p_fpga_info;
+
+	int m_item_id;
+	int m_product_id;
+	int m_fpga_version;
+	int m_fpga_revision;
+	int m_fpga_patch_no;
+	int m_fpga_build_no;
+	uint32_t m_fpga_build_time;
+
+	int mn_params;
+	struct nt_param_s **mpa_params;
+
+	int mn_modules;
+	struct nt_module_s **mpa_modules;
+
+	nt_fpga_prod_init_t *mp_init;
+
+	int m_debug_mode;
+};
+
+typedef struct nt_fpga_s nt_fpga_t;
+
+struct nt_param_s {
+	nt_fpga_t *mp_owner;
+
+	int param_id;
+	int param_value;
+
+	nt_fpga_prod_param_t *mp_init;
+};
+
+typedef struct nt_param_s nt_param_t;
+
+struct nt_module_s {
+	nt_fpga_t *mp_owner;
+
+	int m_mod_id;
+
+	int m_instance;
+
+	int m_mod_def_id;
+	int m_major_version;
+	int m_minor_version;
+
+	int m_bus;
+	uint32_t m_addr_base;
+
+	int m_debug_mode;
+
+	int mn_registers;
+	struct nt_register_s **mpa_registers;
+
+	nt_fpga_module_init_t *mp_init;
+};
+
+typedef struct nt_module_s nt_module_t;
+
+struct nt_register_s {
+	nt_module_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_addr_rel;
+	uint32_t m_addr;
+	uint32_t m_type;
+	uint32_t m_len;
+
+	int m_debug_mode;
+
+	int mn_fields;
+	struct nt_field_s **mpa_fields;
+
+	uint32_t *mp_shadow;
+	bool *mp_dirty;
+
+	nt_fpga_register_init_t *mp_init;
+};
+
+typedef struct nt_register_s nt_register_t;
+
+struct nt_field_s {
+	nt_register_t *mp_owner;
+
+	uint32_t m_id;
+
+	uint32_t mn_bit_width;
+	uint32_t mn_bit_pos_low;
+	uint32_t m_reset_val;
+	uint32_t m_first_word;
+	uint32_t m_first_bit;
+	uint32_t m_front_mask;
+	uint32_t m_body_length;
+	uint32_t mn_words;
+	uint32_t m_tail_mask;
+
+	int m_debug_mode;
+
+	nt_fpga_field_init_t *mp_init;
+};
+
+typedef struct nt_field_s nt_field_t;
+
+nt_fpga_mgr_t *fpga_mgr_new(void);
+void fpga_mgr_init(nt_fpga_mgr_t *p);
+void fpga_mgr_delete(nt_fpga_mgr_t *p);
+nt_fpga_t *fpga_mgr_query_fpga(nt_fpga_mgr_t *p, uint64_t n_fpga_id,
+			     struct fpga_info_s *p_fpga_info);
+
+void fpga_mgr_log_dump(nt_fpga_mgr_t *p);
+void fpga_mgr_show(nt_fpga_mgr_t *p, FILE *out, int detail_level);
+
+nt_fpga_t *fpga_new(void);
+void fpga_delete(nt_fpga_t *p);
+void fpga_delete_all(nt_fpga_t *p);
+void fpga_init(nt_fpga_t *p, nt_fpga_prod_init_t *fpga_prod_init,
+	       struct fpga_info_s *p_fpga_info);
+
+int fpga_get_product_param(const nt_fpga_t *p, const int n_param_id,
+			 const int default_value);
+int fpga_get_product_id(const nt_fpga_t *p);
+int fpga_get_fpga_version(const nt_fpga_t *p);
+int fpga_get_fpga_revision(const nt_fpga_t *p);
+nt_module_t *fpga_query_module(const nt_fpga_t *p, int id, int instance);
+nt_fpga_module_init_t *fpga_lookup_init(nt_fpga_t *p, int id, int instance);
+bool fpga_query(nt_fpga_t *p, int id, int instance);
+void fpga_set_debug_mode(nt_fpga_t *p, int n_debug_mode);
+
+void fpga_log_info(const nt_fpga_t *p);
+void fpga_dump(const nt_fpga_t *p);
+void fpga_dump_params(const nt_fpga_t *p);
+void fpga_dump_modules(const nt_fpga_t *p);
+
+nt_param_t *param_new(void);
+void param_delete(nt_param_t *p);
+void param_init(nt_param_t *p, nt_fpga_t *p_fpga, nt_fpga_prod_param_t *p_init);
+
+void param_dump(const nt_param_t *p);
+
+nt_module_t *module_new(void);
+void module_delete(nt_module_t *p);
+void module_init(nt_module_t *p, nt_fpga_t *p_fpga,
+		 nt_fpga_module_init_t *p_init);
+void module_init2(nt_module_t *p, nt_fpga_t *p_fpga, int mod_id, int instance,
+		  int debug_mode);
+
+int module_get_major_version(const nt_module_t *p);
+int module_get_minor_version(const nt_module_t *p);
+uint64_t module_get_version_packed64(const nt_module_t *p);
+bool module_is_version_newer(const nt_module_t *p, int major_version,
+			   int minor_version);
+
+int module_get_bus(const nt_module_t *p);
+nt_register_t *module_get_register(nt_module_t *p, uint32_t id);
+nt_register_t *module_query_register(nt_module_t *p, uint32_t id);
+int module_get_debug_mode(const nt_module_t *p);
+void module_set_debug_mode(nt_module_t *p, unsigned int debug_mode);
+uint32_t module_get_addr_base(const nt_module_t *p);
+void module_unsuppported(const nt_module_t *p);
+
+void module_dump(const nt_module_t *p);
+void module_dump_registers(const nt_module_t *p);
+
+nt_register_t *register_new(void);
+void register_delete(nt_register_t *p);
+void register_init(nt_register_t *p, nt_module_t *p_module,
+		   nt_fpga_register_init_t *p_init);
+
+nt_field_t *register_get_field(const nt_register_t *p, uint32_t id);
+nt_field_t *register_query_field(const nt_register_t *p, uint32_t id);
+
+uint32_t register_get_address(const nt_register_t *p);
+uint32_t register_get_addr_rel(const nt_register_t *p);
+int register_get_bit_width(const nt_register_t *p);
+int register_get_debug_mode(const nt_module_t *p);
+void register_set_debug_mode(nt_register_t *p, unsigned int debug_mode);
+
+void register_get_val(const nt_register_t *p, uint32_t *p_data, uint32_t len);
+uint32_t register_get_val32(const nt_register_t *p);
+uint32_t register_get_val_updated32(const nt_register_t *p);
+
+void register_set_val(nt_register_t *p, const uint32_t *p_data, uint32_t len);
+void register_set_val_flush(nt_register_t *p, const uint32_t *p_data,
+			  uint32_t len);
+
+void register_make_dirty(nt_register_t *p);
+void register_update(const nt_register_t *p);
+void register_reset(const nt_register_t *p);
+void register_flush(const nt_register_t *p, uint32_t cnt);
+void register_clr(nt_register_t *p);
+void register_set(nt_register_t *p);
+
+void register_do_read_trig_ts(const nt_register_t *p, uint64_t *tsc1,
+			   uint64_t *tsc2);
+
+void register_dump(const nt_register_t *p);
+void register_dump_fields(const nt_register_t *p);
+
+nt_field_t *field_new(void);
+void field_delete(nt_field_t *p);
+void field_init(nt_field_t *p, nt_register_t *p_reg,
+		const nt_fpga_field_init_t *p_init);
+
+int field_get_debug_mode(const nt_module_t *p);
+void field_set_debug_mode(nt_field_t *p, unsigned int n_debug_mode);
+int field_get_bit_width(const nt_field_t *p);
+int field_get_bit_pos_low(const nt_field_t *p);
+int field_get_bit_pos_high(const nt_field_t *p);
+uint32_t field_get_mask(const nt_field_t *p);
+void field_reset(const nt_field_t *p);
+uint32_t field_get_reset_val(const nt_field_t *p);
+void field_get_val(const nt_field_t *p, uint32_t *p_data, uint32_t len);
+void field_set_val(const nt_field_t *p, const uint32_t *p_data, uint32_t len);
+void field_set_val_flush(const nt_field_t *p, const uint32_t *p_data,
+		       uint32_t len);
+uint32_t field_get_val_mask(const nt_field_t *p);
+uint32_t field_get_val32(const nt_field_t *p);
+uint32_t field_get_updated(const nt_field_t *p);
+void field_read_trig_with_tsc(const nt_field_t *p, uint64_t *tsc1, uint64_t *tsc2);
+void field_update_register(const nt_field_t *p);
+void field_flush_register(const nt_field_t *p);
+void field_set_val32(const nt_field_t *p, uint32_t val);
+void field_set_val_flush32(const nt_field_t *p, uint32_t val);
+void field_clr_all(const nt_field_t *p);
+void field_clr_flush(const nt_field_t *p);
+void field_set_all(const nt_field_t *p);
+void field_set_flush(const nt_field_t *p);
+
+int field_wait_clr_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_all32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_clr_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+int field_wait_set_any32(const nt_field_t *p, int n_poll_iterations,
+		       int n_poll_interval);
+
+int field_wait_val_mask32(const nt_field_t *p, uint32_t n_wait_cond_value,
+			uint32_t n_wait_cond_mask, int n_poll_iterations,
+			int n_poll_interval);
+
+void field_dump(const nt_field_t *p);
+void field_dump_val(const nt_field_t *p);
+void field_dump_init(const nt_fpga_field_init_t *p);
+
+/*
+ * nthw helpers
+ */
+nt_fpga_t *nthw_get_fpga(struct fpga_info_s *p_fpga_info, uint64_t n_fpga_ident);
+nt_module_t *nthw_get_module(nt_fpga_t *p_fpga, int n_mod, int n_instance);
+nt_register_t *nthw_get_register(nt_module_t *p_mod, int n_reg);
+nt_field_t *nthw_get_field(nt_register_t *p_reg, int n_fld);
+
+#endif /* __NTHW_FPGA_MODEL_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_helper.h b/drivers/net/ntnic/nthw/nthw_helper.h
new file mode 100644
index 0000000000..22f6a0d471
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_HELPER_H__
+#define __NTHW_HELPER_H__
+
+#include <unistd.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#endif /* __NTHW_HELPER_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_platform.c b/drivers/net/ntnic/nthw/nthw_platform.c
new file mode 100644
index 0000000000..203947e03a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_platform_drv.h"
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id)
+{
+	switch (n_pci_device_id) {
+	case NT_HW_PCI_DEVICE_ID_NT40E3:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT100E3:
+		return NT_HW_ADAPTER_ID_NT100E3;
+	case NT_HW_PCI_DEVICE_ID_NT80E3:
+		return NT_HW_ADAPTER_ID_NT80E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A00:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT40A01:
+		return NT_HW_ADAPTER_ID_NT40E3;
+	case NT_HW_PCI_DEVICE_ID_NT200E3:
+		return NT_HW_ADAPTER_ID_NT200E3;
+	case NT_HW_PCI_DEVICE_ID_NT200A01:
+		return NT_HW_ADAPTER_ID_NT200A01;
+	case NT_HW_PCI_DEVICE_ID_NT200D01:
+		return NT_HW_ADAPTER_ID_NT200D01;
+	case NT_HW_PCI_DEVICE_ID_NT200A02:
+		return NT_HW_ADAPTER_ID_NT200A02;
+	case NT_HW_PCI_DEVICE_ID_NT50B01:
+		return NT_HW_ADAPTER_ID_NT50B01;
+	case NT_HW_PCI_DEVICE_ID_NT100A01:
+		return NT_HW_ADAPTER_ID_NT100A01;
+	default:
+		return NT_HW_ADAPTER_ID_UNKNOWN;
+	}
+}
diff --git a/drivers/net/ntnic/nthw/nthw_platform_drv.h b/drivers/net/ntnic/nthw/nthw_platform_drv.h
new file mode 100644
index 0000000000..fee2dc4853
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_platform_drv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PLATFORM_DRV_H__
+#define __NTHW_PLATFORM_DRV_H__
+
+#include "nthw_helper.h"
+
+#define NT_HW_PCI_VENDOR_ID (0x18f4)
+
+#define NT_HW_PCI_DEVICE_ID_NT40E3 (0x145)
+#define NT_HW_PCI_DEVICE_ID_NT100E3 (0x155)
+#define NT_HW_PCI_DEVICE_ID_NT80E3 (0x165)
+#define NT_HW_PCI_DEVICE_ID_NT40A00 (0x175)
+#define NT_HW_PCI_DEVICE_ID_NT40A01 (0x185)
+#define NT_HW_PCI_DEVICE_ID_NT200E3 (0x195)
+#define NT_HW_PCI_DEVICE_ID_NT200A01 (0x1A5)
+#define NT_HW_PCI_DEVICE_ID_NT200D01 (0x1B5)
+#define NT_HW_PCI_DEVICE_ID_NT200A02 (0x1C5)
+#define NT_HW_PCI_DEVICE_ID_NT50B01 (0x1D5)
+#define NT_HW_PCI_DEVICE_ID_NT100A01 (0x1E5)
+
+enum nthw_adapter_id_e {
+	NT_HW_ADAPTER_ID_UNKNOWN = 0,
+	NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT40A01 = NT_HW_ADAPTER_ID_NT40E3,
+	NT_HW_ADAPTER_ID_NT50B01,
+	NT_HW_ADAPTER_ID_NT80E3,
+	NT_HW_ADAPTER_ID_NT100E3,
+	NT_HW_ADAPTER_ID_NT100A01,
+	NT_HW_ADAPTER_ID_NT200E3,
+	NT_HW_ADAPTER_ID_NT200A01,
+	NT_HW_ADAPTER_ID_NT200D01,
+	NT_HW_ADAPTER_ID_NT200A02,
+};
+
+typedef enum nthw_adapter_id_e nthw_adapter_id_t;
+
+nthw_adapter_id_t nthw_platform_get_nthw_adapter_id(const uint16_t n_pci_device_id);
+
+#endif /* __NTHW_PLATFORM_DRV_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_profile.h b/drivers/net/ntnic/nthw/nthw_profile.h
new file mode 100644
index 0000000000..2fcb7b4adf
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_profile.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_PROFILE_H__
+#define __NTHW_PROFILE_H__
+
+enum fpga_info_profile {
+	FPGA_INFO_PROFILE_UNKNOWN = 0,
+	FPGA_INFO_PROFILE_VSWITCH = 1,
+	FPGA_INFO_PROFILE_INLINE = 2,
+	FPGA_INFO_PROFILE_CAPTURE = 3,
+};
+
+#endif /* __NTHW_PROFILE_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c
new file mode 100644
index 0000000000..f3f6bee223
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include <pthread.h>
+
+/*
+ * Prevent that RAB echo debug trace ever gets into a release build
+ */
+#if defined(DEBUG)
+#undef RAB_DEBUG_ECHO
+#else
+#undef RAB_DEBUG_ECHO
+#endif /* DEBUG */
+
+#define RAB_DMA_WAIT (1000000)
+#define RAB_DMA_BUF_CNT (0x4000)
+
+#define RAB_READ (0x01)
+#define RAB_WRITE (0x02)
+#define RAB_ECHO (0x08)
+#define RAB_COMPLETION (0x0F)
+
+#define RAB_READ_ECHO (RAB_READ | RAB_ECHO)
+#define RAB_WRITE_ECHO (RAB_WRITE | RAB_ECHO)
+
+#define RAB_OPR_LO (28)
+#define RAB_OPR_HI (31)
+#define RAB_OPR_BW (4)
+
+#define RAB_CNT_LO (20)
+#define RAB_CNT_HI (27)
+#define RAB_CNT_BW (8)
+
+#define RAB_BUSID_LO (16)
+#define RAB_BUSID_HI (19)
+#define RAB_BUSID_BW (4)
+
+#define RAB_ADDR_LO (0)
+#define RAB_ADDR_HI (15)
+#define RAB_ADDR_BW (16)
+
+nthw_rac_t *nthw_rac_new(void)
+{
+	nthw_rac_t *p = malloc(sizeof(nthw_rac_t));
+
+	memset(p, 0, sizeof(nthw_rac_t));
+	return p;
+}
+
+void nthw_rac_delete(nthw_rac_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rac_t));
+		free(p);
+	}
+}
+
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_RAC, 0);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RAC %d: no such instance\n",
+		       p_adapter_id_str, 0);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mp_mod_rac = mod;
+
+	{
+		/*
+		 * RAC is a primary communication channel
+		 * turn off debug by default
+		 * except for rac_rab_init
+		 */
+		const int n_debug_mode = module_get_debug_mode(p->mp_mod_rac);
+
+		if (n_debug_mode && n_debug_mode <= 0xff) {
+			module_set_debug_mode(p->mp_mod_rac, 0);
+			register_set_debug_mode(p->mp_reg_rab_init, n_debug_mode);
+		}
+	}
+
+	/* Params */
+	p->mn_param_rac_rab_interfaces =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_INTERFACES, 3);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_interfaces);
+
+	p->mn_param_rac_rab_ob_update =
+		fpga_get_product_param(p->mp_fpga, NT_RAC_RAB_OB_UPDATE, 0);
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_OB_UPDATE=%d\n", p_adapter_id_str,
+	       p->mn_param_rac_rab_ob_update);
+
+	/* Optional dummy test registers */
+	p->mp_reg_dummy0 = module_query_register(p->mp_mod_rac, RAC_DUMMY0);
+	p->mp_reg_dummy1 = module_query_register(p->mp_mod_rac, RAC_DUMMY1);
+	p->mp_reg_dummy2 = module_query_register(p->mp_mod_rac, RAC_DUMMY2);
+
+	p->mp_reg_rab_init = module_get_register(p->mp_mod_rac, RAC_RAB_INIT);
+	p->mp_fld_rab_init = register_get_field(p->mp_reg_rab_init, RAC_RAB_INIT_RAB);
+	p->mn_fld_rab_init_bw = field_get_bit_width(p->mp_fld_rab_init);
+	p->mn_fld_rab_init_mask = field_get_mask(p->mp_fld_rab_init);
+
+	/* RAC_RAB_INIT_RAB reg/field sanity checks: */
+	assert(p->mn_fld_rab_init_mask == ((1UL << p->mn_fld_rab_init_bw) - 1));
+	assert(p->mn_fld_rab_init_bw == p->mn_param_rac_rab_interfaces);
+
+	p->mp_reg_dbg_ctrl = module_query_register(p->mp_mod_rac, RAC_DBG_CTRL);
+	if (p->mp_reg_dbg_ctrl) {
+		p->mp_fld_dbg_ctrl =
+			register_query_field(p->mp_reg_dbg_ctrl, RAC_DBG_CTRL_C);
+	} else {
+		p->mp_fld_dbg_ctrl = NULL;
+	}
+	p->mp_reg_dbg_data = module_query_register(p->mp_mod_rac, RAC_DBG_DATA);
+	if (p->mp_reg_dbg_data) {
+		p->mp_fld_dbg_data =
+			register_query_field(p->mp_reg_dbg_data, RAC_DBG_DATA_D);
+	} else {
+		p->mp_reg_dbg_data = NULL;
+	}
+	p->mp_reg_rab_ib_data = module_get_register(p->mp_mod_rac, RAC_RAB_IB_DATA);
+	p->mp_fld_rab_ib_data =
+		register_get_field(p->mp_reg_rab_ib_data, RAC_RAB_IB_DATA_D);
+
+	p->mp_reg_rab_ob_data = module_get_register(p->mp_mod_rac, RAC_RAB_OB_DATA);
+	p->mp_fld_rab_ob_data =
+		register_get_field(p->mp_reg_rab_ob_data, RAC_RAB_OB_DATA_D);
+
+	p->mp_reg_rab_buf_free = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_FREE);
+	p->mp_fld_rab_buf_free_ib_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_FREE);
+	p->mp_fld_rab_buf_free_ib_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_IB_OVF);
+	p->mp_fld_rab_buf_free_ob_free =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_FREE);
+	p->mp_fld_rab_buf_free_ob_ovf =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_OB_OVF);
+	p->mp_fld_rab_buf_free_timeout =
+		register_get_field(p->mp_reg_rab_buf_free, RAC_RAB_BUF_FREE_TIMEOUT);
+
+	p->mp_reg_rab_buf_used = module_get_register(p->mp_mod_rac, RAC_RAB_BUF_USED);
+	p->mp_fld_rab_buf_used_ib_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_IB_USED);
+	p->mp_fld_rab_buf_used_ob_used =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_OB_USED);
+	p->mp_fld_rab_buf_used_flush =
+		register_get_field(p->mp_reg_rab_buf_used, RAC_RAB_BUF_USED_FLUSH);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA - not found in 9231/9232 and
+	 * earlier
+	 */
+	p->mp_reg_rab_dma_ib_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_LO);
+	p->mp_fld_rab_dma_ib_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_lo, RAC_RAB_DMA_IB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_HI);
+	p->mp_fld_rab_dma_ib_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ib_hi, RAC_RAB_DMA_IB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_lo = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_LO);
+	p->mp_fld_rab_dma_ob_lo_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_lo, RAC_RAB_DMA_OB_LO_PHYADDR);
+
+	p->mp_reg_rab_dma_ob_hi = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_HI);
+	p->mp_fld_rab_dma_ob_hi_phy_addr =
+		register_get_field(p->mp_reg_rab_dma_ob_hi, RAC_RAB_DMA_OB_HI_PHYADDR);
+
+	p->mp_reg_rab_dma_ib_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_WR);
+	p->mp_fld_rab_dma_ib_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_wr, RAC_RAB_DMA_IB_WR_PTR);
+
+	p->mp_reg_rab_dma_ib_rd = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_IB_RD);
+	p->mp_fld_rab_dma_ib_rd_ptr =
+		register_get_field(p->mp_reg_rab_dma_ib_rd, RAC_RAB_DMA_IB_RD_PTR);
+
+	p->mp_reg_rab_dma_ob_wr = module_get_register(p->mp_mod_rac, RAC_RAB_DMA_OB_WR);
+	p->mp_fld_rab_dma_ob_wr_ptr =
+		register_get_field(p->mp_reg_rab_dma_ob_wr, RAC_RAB_DMA_OB_WR_PTR);
+
+	p->rac_rab_init_addr = register_get_address(p->mp_reg_rab_init);
+	p->rac_rab_ib_data_addr = register_get_address(p->mp_reg_rab_ib_data);
+	p->rac_rab_ob_data_addr = register_get_address(p->mp_reg_rab_ob_data);
+	p->rac_rab_buf_free_addr = register_get_address(p->mp_reg_rab_buf_free);
+	p->rac_rab_buf_used_addr = register_get_address(p->mp_reg_rab_buf_used);
+
+	/*
+	 * RAC_RAB_DMA regs are optional - only found in real NT4GA -
+	 * not found in 9231/9232 and earlier
+	 */
+
+	p->rac_rab_dma_ib_lo_addr = register_get_address(p->mp_reg_rab_dma_ib_lo);
+	p->rac_rab_dma_ib_hi_addr = register_get_address(p->mp_reg_rab_dma_ib_hi);
+	p->rac_rab_dma_ob_lo_addr = register_get_address(p->mp_reg_rab_dma_ob_lo);
+	p->rac_rab_dma_ob_hi_addr = register_get_address(p->mp_reg_rab_dma_ob_hi);
+	p->rac_rab_dma_ib_rd_addr = register_get_address(p->mp_reg_rab_dma_ib_rd);
+	p->rac_rab_dma_ob_wr_addr = register_get_address(p->mp_reg_rab_dma_ob_wr);
+	p->rac_rab_dma_ib_wr_addr = register_get_address(p->mp_reg_rab_dma_ib_wr);
+
+	p->rac_rab_buf_free_ib_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ib_free);
+	p->rac_rab_buf_free_ob_free_mask =
+		field_get_mask(p->mp_fld_rab_buf_free_ob_free);
+	p->rac_rab_buf_used_ib_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ib_used);
+	p->rac_rab_buf_used_ob_used_mask =
+		field_get_mask(p->mp_fld_rab_buf_used_ob_used);
+
+	p->rac_rab_buf_used_flush_mask = field_get_mask(p->mp_fld_rab_buf_used_flush);
+
+	p->rac_rab_buf_used_ob_used_low =
+		field_get_bit_pos_low(p->mp_fld_rab_buf_used_ob_used);
+
+	p->mp_reg_rab_nmb_rd = module_query_register(p->mp_mod_rac, RAC_NMB_RD_ADR);
+	if (p->mp_reg_rab_nmb_rd)
+		p->rac_nmb_rd_adr_addr = register_get_address(p->mp_reg_rab_nmb_rd);
+
+	p->mp_reg_rab_nmb_data = module_query_register(p->mp_mod_rac, RAC_NMB_DATA);
+	if (p->mp_reg_rab_nmb_data)
+		p->rac_nmb_data_addr = register_get_address(p->mp_reg_rab_nmb_data);
+
+	p->mp_reg_rab_nmb_wr = module_query_register(p->mp_mod_rac, RAC_NMB_WR_ADR);
+	if (p->mp_reg_rab_nmb_wr)
+		p->rac_nmb_wr_adr_addr = register_get_address(p->mp_reg_rab_nmb_wr);
+
+	p->mp_reg_rab_nmb_status =
+		module_query_register(p->mp_mod_rac, RAC_NMB_STATUS);
+	if (p->mp_reg_rab_nmb_status) {
+		p->rac_nmb_status_addr =
+			register_get_address(p->mp_reg_rab_nmb_status);
+	}
+
+	p->m_dma = NULL;
+
+	pthread_mutex_init(&p->m_mutex, NULL);
+
+	return 0;
+}
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p)
+{
+	return p->mn_param_rac_rab_interfaces;
+}
+
+static inline int nthw_rac_wait_for_rab_done(const nthw_rac_t *p, uint32_t address,
+		uint32_t word_cnt)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t used = 0;
+	uint32_t retry;
+
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &used);
+		used = (used & p->rac_rab_buf_used_ob_used_mask) >>
+		       p->rac_rab_buf_used_ob_used_low;
+		if (used >= word_cnt)
+			break;
+	}
+
+	if (used < word_cnt) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Fail rab bus r/w addr=0x%08X used=%x wordcount=%d\n",
+		       p_adapter_id_str, address, used, word_cnt);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NT_PCI_REG_P9xyz_RAC_RAB_INIT
+ *
+ * Initializes (resets) the programmable registers on the Register Access Buses (RAB).
+ * This initialization must be performed by software as part of the driver load procedure.
+ *
+ * Bit n of this field initializes the programmable registers on RAB interface n.
+ * Software must write one to the bit and then clear the bit again.
+ *
+ * All RAB module registers will be reset to their defaults.
+ * This includes the product specific RESET module (eg RST9xyz)
+ * As a consequence of this behavior the official reset sequence
+ * must be excersised - as all RAB modules will be held in reset.
+ */
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t n_rab_intf_mask)
+{
+	/*
+	 * Write rac_rab_init
+	 * Perform operation twice - first to get trace of operation -
+	 * second to get things done...
+	 */
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	field_set_val_flush32(p->mp_fld_rab_init, n_rab_intf_mask);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_init_addr, n_rab_intf_mask);
+	return 0;
+}
+
+int nthw_rac_rab_reset(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	const char *const p_adapter_id_str _unused = p_fpga_info->mp_adapter_id_str;
+
+	/* RAC RAB bus "flip/flip" reset */
+	const int n_rac_rab_bus_count = nthw_rac_get_rab_interface_count(p);
+	const int n_rac_rab_bus_mask = (1 << n_rac_rab_bus_count) - 1;
+
+	NT_LOG(DBG, NTHW, "%s: NT_RAC_RAB_INTERFACES=%d (0x%02X)\n",
+	       p_adapter_id_str, n_rac_rab_bus_count, n_rac_rab_bus_mask);
+	assert(n_rac_rab_bus_count);
+	assert(n_rac_rab_bus_mask);
+
+	/* RAC RAB bus "flip/flip" reset first stage - new impl (ref RMT#37020) */
+	nthw_rac_rab_init(p, 0);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask);
+	nthw_rac_rab_init(p, n_rac_rab_bus_mask & ~0x01);
+
+	return 0;
+}
+
+int nthw_rac_rab_setup(nthw_rac_t *p)
+{
+	int rc = 0;
+
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	uint32_t n_dma_buf_size = 2L * RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	const size_t align_size = ALIGN_SIZE(n_dma_buf_size);
+	int numa_node = p_fpga_info->numa_node;
+	uint64_t dma_addr;
+	uint32_t buf;
+
+	if (!p->m_dma) {
+		struct nt_dma_s *vfio_dma;
+		/* FPGA needs Page alignment (4K) */
+		vfio_dma = nt_dma_alloc(align_size, 0x1000, numa_node);
+
+		if (vfio_dma == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: nt_dma_alloc failed\n",
+			       __func__);
+			return -1;
+		}
+		p->m_dma_in_buf = (uint32_t *)vfio_dma->addr;
+		p->m_dma_out_buf = p->m_dma_in_buf + RAB_DMA_BUF_CNT;
+		p->m_dma = vfio_dma;
+	}
+
+	/* Setup DMA on the adapter */
+	dma_addr = p->m_dma->iova;
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+	dma_addr += RAB_DMA_BUF_CNT * sizeof(uint32_t);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_lo_addr,
+			   dma_addr & 0xffffffff);
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ob_hi_addr,
+			   (uint32_t)(dma_addr >> 32) & 0xffffffff);
+
+	/* Set initial value of internal pointers */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ib_rd_addr, &buf);
+	p->m_dma_in_ptr_wr = (uint16_t)(buf / sizeof(uint32_t));
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_dma_ob_wr_addr, &buf);
+	p->m_dma_out_ptr_rd = (uint16_t)(buf / sizeof(uint32_t));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return rc;
+}
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		pthread_mutex_unlock(&p->m_mutex);
+		NT_LOG(ERR, NTHW,
+		       "%s: DMA begin requested, but a DMA transaction is already active\n",
+		       p_adapter_id_str);
+		return -1;
+	}
+
+	p->m_dma_active = true;
+
+	return 0;
+}
+
+static void nthw_rac_rab_dma_activate(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+
+	/* Write completion word */
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] = completion;
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	/* Clear output completion word */
+	p->m_dma_out_buf[p->m_dma_out_ptr_rd] = 0;
+
+	/* _update DMA pointer and start transfer */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_dma_ib_wr_addr,
+			   (uint32_t)(p->m_dma_in_ptr_wr * sizeof(uint32_t)));
+}
+
+static int nthw_rac_rab_dma_wait(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const uint32_t completion = RAB_COMPLETION << RAB_OPR_LO;
+	uint32_t i;
+
+	for (i = 0; i < RAB_DMA_WAIT; i++) {
+		NT_OS_WAIT_USEC_POLL(1);
+		if ((p->m_dma_out_buf[p->m_dma_out_ptr_rd] & completion) ==
+				completion)
+			break;
+	}
+
+	if (i == RAB_DMA_WAIT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Unexpected value of completion (0x%08X)\n",
+		       p_fpga_info->mp_adapter_id_str,
+		       p->m_dma_out_buf[p->m_dma_out_ptr_rd]);
+		return -1;
+	}
+
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+	p->m_in_free = RAB_DMA_BUF_CNT;
+
+	return 0;
+}
+
+int nthw_rac_rab_dma_commit(nthw_rac_t *p)
+{
+	int ret;
+
+	if (!p->m_dma_active) {
+		/* Expecting mutex not to be locked! */
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	nthw_rac_rab_dma_activate(p);
+	ret = nthw_rac_rab_dma_wait(p);
+
+	p->m_dma_active = false;
+
+	pthread_mutex_unlock(&p->m_mutex);
+
+	return ret;
+}
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data)
+{
+	*p_data = *(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr +
+					reg_addr);
+}
+
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data)
+{
+	*(volatile uint32_t *)((uint8_t *)p_fpga_info->bar0_addr + reg_addr) =
+		p_data;
+}
+
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma write length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if (p->m_in_free < (word_cnt + 3)) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= (word_cnt + 1);
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt + 1) &
+				     (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_WRITE << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	for (uint32_t i = 0; i < word_cnt; i++) {
+		p->m_dma_in_buf[p->m_dma_in_ptr_wr] = p_data[i];
+		p->m_dma_in_ptr_wr = (uint16_t)((p->m_dma_in_ptr_wr + 1) &
+					    (RAB_DMA_BUF_CNT - 1));
+	}
+
+	return 0;
+}
+
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+
+	if (word_cnt == 0 || word_cnt > 256) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X\n",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt,
+		       p->m_in_free);
+		assert(0); /* alert developer that something is wrong */
+		return -1;
+	}
+
+	if ((word_cnt + 3) > RAB_DMA_BUF_CNT) {
+		NT_LOG(ERR, NTHW,
+		       "%s: Failed rab dma read length check - bus: %d addr: 0x%08X wordcount: %d: 0x%08X",
+		       p_fpga_info->mp_adapter_id_str, bus_id, address, word_cnt);
+		return -1;
+	}
+
+	if (p->m_in_free < 3) {
+		/*
+		 * No more memory available.
+		 * nthw_rac_rab_dma_commit() needs to be called to start and finish pending
+		 * transfers.
+		 */
+		return -1;
+	}
+
+	p->m_in_free -= 1;
+
+	/* Write the command word */
+#if defined(RAB_DEBUG_ECHO)
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ_ECHO << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+	p->m_dma_out_ptr_rd =
+		(uint16_t)((p->m_dma_out_ptr_rd + 1) & (RAB_DMA_BUF_CNT - 1));
+#else
+	p->m_dma_in_buf[p->m_dma_in_ptr_wr] =
+		(RAB_READ << RAB_OPR_LO) |
+		((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+		(bus_id << RAB_BUSID_LO) | address;
+#endif
+	p->m_dma_in_ptr_wr =
+		(uint16_t)((p->m_dma_in_ptr_wr + 1) & (RAB_DMA_BUF_CNT - 1));
+
+	buf_ptr->index = p->m_dma_out_ptr_rd;
+	buf_ptr->size = RAB_DMA_BUF_CNT;
+	buf_ptr->base = p->m_dma_out_buf;
+	p->m_dma_out_ptr_rd = (uint16_t)((p->m_dma_out_ptr_rd + word_cnt) &
+				     (RAB_DMA_BUF_CNT - 1U));
+
+	return 0;
+}
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_wr;
+	uint32_t rab_oper_cmpl;
+	uint32_t rab_echo_oper_cmpl;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		return -1;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		return -1;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		return -1;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		return -1;
+	}
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (p->m_dma_active) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled\n",
+		       p_adapter_id_str);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold one completion word,
+	 * input buffer can hold the number of words to be written +
+	 * one write and one completion command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= 1 && (in_buf_free >= word_cnt + 2)) && buf_used == 0) {
+		uint32_t i;
+
+		word_cnt_expected = 0;
+
+		/* Compose write command */
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_wr =
+			(RAB_WRITE_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected += word_cnt + 1;
+#else
+		rab_oper_wr =
+			(RAB_WRITE << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_wr);
+
+		/* Write da to input buffer */
+		for (i = 0; i < word_cnt; i++) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+					   *p_data);
+			p_data++;
+		}
+
+		/* Compose completion command */
+		rab_oper_cmpl = (RAB_COMPLETION << RAB_OPR_LO);
+		word_cnt_expected++;
+
+		/* Write command */
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_cmpl);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		{
+			uint32_t rab_echo_oper_wr;
+
+			nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+					  &rab_echo_oper_wr);
+			if (p->mn_param_rac_rab_ob_update) {
+				nthw_rac_reg_write32(p_fpga_info,
+						   p->rac_rab_ob_data_addr, 0);
+			}
+			if (rab_oper_wr != rab_echo_oper_wr) {
+				NT_LOG(ERR, NTHW,
+				       "%s: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+				       p_adapter_id_str, rab_oper_wr, rab_echo_oper_wr);
+			}
+		}
+
+		{
+			/* Read data from output buffer */
+			uint32_t data;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  &data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr, 0);
+				}
+			}
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read completion from out buffer */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_cmpl);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_echo_oper_cmpl != rab_oper_cmpl) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: Unexpected value of completion (0x%08X)- inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, rab_echo_oper_cmpl, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	int res = 0;
+	uint32_t rab_oper_rd;
+	uint32_t word_cnt_expected;
+	uint32_t buf_used;
+	uint32_t buf_free;
+	uint32_t in_buf_free;
+	uint32_t out_buf_free;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	if (address > (1 << RAB_ADDR_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal address: value too large %d - max %d\n",
+		       p_adapter_id_str, address, (1 << RAB_ADDR_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (bus_id > (1 << RAB_BUSID_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal bus id: value too large %d - max %d\n",
+		       p_adapter_id_str, bus_id, (1 << RAB_BUSID_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt == 0) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value is zero (%d)\n",
+		       p_adapter_id_str, word_cnt);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	if (word_cnt > (1 << RAB_CNT_BW)) {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Illegal word count: value too large %d - max %d\n",
+		       p_adapter_id_str, word_cnt, (1 << RAB_CNT_BW));
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+	/* Read buffer free register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr, &buf_free);
+
+	in_buf_free = buf_free & p->rac_rab_buf_free_ib_free_mask;
+	out_buf_free = (buf_free & p->rac_rab_buf_free_ob_free_mask) >> 16;
+
+	/* Read buffer used register */
+	nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &buf_used);
+
+	buf_used = buf_used & (p->rac_rab_buf_used_ib_used_mask |
+			     p->rac_rab_buf_used_ob_used_mask);
+
+	/*
+	 * Verify that output buffer can hold the number of words to be read,
+	 * input buffer can hold one read command
+	 * and that the input and output "used" buffer is 0
+	 */
+	if ((out_buf_free >= word_cnt && in_buf_free >= 1) && buf_used == 0) {
+		word_cnt_expected = word_cnt;
+
+#if defined(RAB_DEBUG_ECHO)
+		rab_oper_rd =
+			(RAB_READ_ECHO << RAB_OPR_LO) |
+			((word_cnt & ((1 << RAB_CNT_BW) - 1)) << RAB_CNT_LO) |
+			(bus_id << RAB_BUSID_LO) | address;
+		word_cnt_expected++;
+#else
+		rab_oper_rd = (RAB_READ << RAB_OPR_LO) | (word_cnt << RAB_CNT_LO) |
+			    (bus_id << RAB_BUSID_LO) | address;
+#endif /* RAB_DEBUG_ECHO */
+
+		nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ib_data_addr,
+				   rab_oper_rd);
+
+		/* Wait until done */
+		if (nthw_rac_wait_for_rab_done(p, address, word_cnt_expected)) {
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+#if defined(RAB_DEBUG_ECHO)
+		uint32_t rab_echo_oper_rd;
+
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_ob_data_addr,
+				  &rab_echo_oper_rd);
+		if (p->mn_param_rac_rab_ob_update) {
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_ob_data_addr,
+					   0);
+		}
+		if (rab_oper_rd != rab_echo_oper_rd) {
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: expected rab read echo oper (0x%08X) - read (0x%08X)\n",
+			       p_adapter_id_str, rab_oper_rd, rab_echo_oper_rd);
+		}
+#endif /* RAB_DEBUG_ECHO */
+
+		/* Read data from output buffer */
+		{
+			uint32_t i;
+
+			for (i = 0; i < word_cnt; i++) {
+				nthw_rac_reg_read32(p_fpga_info,
+						  p->rac_rab_ob_data_addr,
+						  p_data);
+				if (p->mn_param_rac_rab_ob_update) {
+					nthw_rac_reg_write32(p_fpga_info,
+							     p->rac_rab_ob_data_addr,
+							     0);
+				}
+				p_data++;
+			}
+		}
+
+		/* Read buffer free register */
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_free_addr,
+				  &buf_free);
+		if (buf_free & 0x80000000) {
+			/* Clear Timeout and overflow bits */
+			nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr,
+					   0x0);
+			NT_LOG(ERR, NTHW,
+			       "%s: RAB: timeout - Access outside register - bus: %d addr: 0x%08X - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+			       p_adapter_id_str, bus_id, address, in_buf_free,
+			       out_buf_free, buf_used);
+			res = -1;
+			goto exit_unlock_res;
+		}
+
+		res = 0;
+		goto exit_unlock_res;
+	} else {
+		NT_LOG(ERR, NTHW,
+		       "%s: RAB: Fail rab bus buffer check - bus: %d addr: 0x%08X wordcount: %d - inBufFree: 0x%08X, outBufFree: 0x%08X, bufUsed: 0x%08X\n",
+		       p_adapter_id_str, bus_id, address, word_cnt, in_buf_free,
+		       out_buf_free, buf_used);
+		res = -1;
+		goto exit_unlock_res;
+	}
+
+exit_unlock_res:
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
+
+int nthw_rac_rab_flush(nthw_rac_t *p)
+{
+	const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+	const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+	uint32_t data = 0;
+	uint32_t retry;
+	int res = 0;
+
+	pthread_mutex_lock(&p->m_mutex);
+
+	/* Set the flush bit */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr,
+			   p->rac_rab_buf_used_flush_mask);
+
+	/* Reset BUF FREE register */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_free_addr, 0x0);
+
+	/* Wait until OB_USED and IB_USED are 0 */
+	for (retry = 0; retry < 100000; retry++) {
+		nthw_rac_reg_read32(p_fpga_info, p->rac_rab_buf_used_addr, &data);
+
+		if ((data & 0xFFFFFFFF) == p->rac_rab_buf_used_flush_mask)
+			break;
+	}
+
+	if (data != p->rac_rab_buf_used_flush_mask) {
+		NT_LOG(ERR, NTHW, "%s: RAB: Rab bus flush error.\n",
+		       p_adapter_id_str);
+		res = -1;
+	}
+
+	/* Clear flush bit when done */
+	nthw_rac_reg_write32(p_fpga_info, p->rac_rab_buf_used_addr, 0x0);
+
+	pthread_mutex_unlock(&p->m_mutex);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h
new file mode 100644
index 0000000000..737598d95a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_rac.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_RAC_H__
+#define __NTHW_RAC_H__
+
+#include "nt_util.h"
+#include "nthw_bus.h"
+
+#include <pthread.h>
+
+struct nthw_rac {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rac;
+
+	pthread_mutex_t m_mutex;
+
+	int mn_param_rac_rab_interfaces;
+	int mn_param_rac_rab_ob_update;
+
+	nt_register_t *mp_reg_dummy0;
+	nt_register_t *mp_reg_dummy1;
+	nt_register_t *mp_reg_dummy2;
+
+	nt_register_t *mp_reg_rab_init;
+	nt_field_t *mp_fld_rab_init;
+
+	int mn_fld_rab_init_bw;
+	uint32_t mn_fld_rab_init_mask;
+
+	nt_register_t *mp_reg_dbg_ctrl;
+	nt_field_t *mp_fld_dbg_ctrl;
+
+	nt_register_t *mp_reg_dbg_data;
+	nt_field_t *mp_fld_dbg_data;
+
+	nt_register_t *mp_reg_rab_ib_data;
+	nt_field_t *mp_fld_rab_ib_data;
+
+	nt_register_t *mp_reg_rab_ob_data;
+	nt_field_t *mp_fld_rab_ob_data;
+
+	nt_register_t *mp_reg_rab_buf_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_free;
+	nt_field_t *mp_fld_rab_buf_free_ib_ovf;
+	nt_field_t *mp_fld_rab_buf_free_ob_free;
+	nt_field_t *mp_fld_rab_buf_free_ob_ovf;
+	nt_field_t *mp_fld_rab_buf_free_timeout;
+
+	nt_register_t *mp_reg_rab_buf_used;
+	nt_field_t *mp_fld_rab_buf_used_ib_used;
+	nt_field_t *mp_fld_rab_buf_used_ob_used;
+	nt_field_t *mp_fld_rab_buf_used_flush;
+
+	nt_register_t *mp_reg_rab_dma_ib_lo;
+	nt_field_t *mp_fld_rab_dma_ib_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_hi;
+	nt_field_t *mp_fld_rab_dma_ib_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_hi;
+	nt_field_t *mp_fld_rab_dma_ob_hi_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ob_lo;
+	nt_field_t *mp_fld_rab_dma_ob_lo_phy_addr;
+
+	nt_register_t *mp_reg_rab_dma_ib_wr;
+	nt_field_t *mp_fld_rab_dma_ib_wr_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ib_rd;
+	nt_field_t *mp_fld_rab_dma_ib_rd_ptr;
+
+	nt_register_t *mp_reg_rab_dma_ob_wr;
+	nt_field_t *mp_fld_rab_dma_ob_wr_ptr;
+
+	nt_register_t *mp_reg_rab_nmb_rd;
+	nt_register_t *mp_reg_rab_nmb_data;
+	nt_register_t *mp_reg_rab_nmb_wr;
+	nt_register_t *mp_reg_rab_nmb_status;
+
+	uint32_t rac_rab_init_addr;
+	uint32_t rac_rab_ib_data_addr;
+	uint32_t rac_rab_ob_data_addr;
+	uint32_t rac_rab_buf_free_addr;
+	uint32_t rac_rab_buf_used_addr;
+
+	uint32_t rac_rab_dma_ib_lo_addr;
+	uint32_t rac_rab_dma_ib_hi_addr;
+	uint32_t rac_rab_dma_ob_lo_addr;
+	uint32_t rac_rab_dma_ob_hi_addr;
+	uint32_t rac_rab_dma_ib_rd_addr;
+	uint32_t rac_rab_dma_ob_wr_addr;
+	uint32_t rac_rab_dma_ib_wr_addr;
+
+	uint32_t rac_rab_buf_free_ib_free_mask;
+	uint32_t rac_rab_buf_free_ob_free_mask;
+	uint32_t rac_rab_buf_used_ib_used_mask;
+	uint32_t rac_rab_buf_used_ob_used_mask;
+	uint32_t rac_rab_buf_used_flush_mask;
+
+	uint32_t rac_rab_buf_used_ob_used_low;
+
+	uint32_t rac_nmb_rd_adr_addr;
+	uint32_t rac_nmb_data_addr;
+	uint32_t rac_nmb_wr_adr_addr;
+	uint32_t rac_nmb_status_addr;
+
+	bool m_dma_active;
+
+	struct nt_dma_s *m_dma;
+
+	volatile uint32_t *m_dma_in_buf;
+	volatile uint32_t *m_dma_out_buf;
+
+	uint16_t m_dma_out_ptr_rd;
+	uint16_t m_dma_in_ptr_wr;
+	uint32_t m_in_free;
+};
+
+typedef struct nthw_rac nthw_rac_t;
+typedef struct nthw_rac nthw_rac;
+
+struct dma_buf_ptr {
+	uint32_t size;
+	uint32_t index;
+	volatile uint32_t *base;
+};
+
+nthw_rac_t *nthw_rac_new(void);
+void nthw_rac_delete(nthw_rac_t *p);
+int nthw_rac_init(nthw_rac_t *p, nt_fpga_t *p_fpga, struct fpga_info_s *p_fpga_info);
+
+int nthw_rac_get_rab_interface_count(const nthw_rac_t *p);
+
+int nthw_rac_rab_init(nthw_rac_t *p, uint32_t rab_intf_mask);
+
+int nthw_rac_rab_setup(nthw_rac_t *p);
+
+int nthw_rac_rab_reset(nthw_rac_t *p);
+
+int nthw_rac_rab_write32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_write32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			    uint32_t word_cnt, const uint32_t *p_data);
+int nthw_rac_rab_read32(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+		       uint32_t word_cnt, uint32_t *p_data);
+int nthw_rac_rab_read32_dma(nthw_rac_t *p, uint32_t address, rab_bus_id_t bus_id,
+			   uint32_t word_cnt, struct dma_buf_ptr *buf_ptr);
+
+int nthw_rac_rab_flush(nthw_rac_t *p);
+
+int nthw_rac_rab_dma_begin(nthw_rac_t *p);
+int nthw_rac_rab_dma_commit(nthw_rac_t *p);
+
+void nthw_rac_reg_read32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+		       uint32_t *p_data);
+void nthw_rac_reg_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_addr,
+			uint32_t p_data);
+
+#endif /* __NTHW_RAC_H__ */
diff --git a/drivers/net/ntnic/nthw/nthw_register.h b/drivers/net/ntnic/nthw/nthw_register.h
index 5cdbd9fc5d..4fe3496b9f 100644
--- a/drivers/net/ntnic/nthw/nthw_register.h
+++ b/drivers/net/ntnic/nthw/nthw_register.h
@@ -10,6 +10,8 @@
 #include <stdbool.h>
 #include <inttypes.h>
 
+#include "nthw_fpga_model.h"
+
 #include "fpga_model.h"
 
 #include "nthw_fpga_modules_defs.h"
diff --git a/drivers/net/ntnic/nthw/nthw_stat.c b/drivers/net/ntnic/nthw/nthw_stat.c
new file mode 100644
index 0000000000..fbecbc2dba
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_stat.h"
+
+#include <malloc.h>
+
+nthw_stat_t *nthw_stat_new(void)
+{
+	nthw_stat_t *p = malloc(sizeof(nthw_stat_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_stat_t));
+	return p;
+}
+
+void nthw_stat_delete(nthw_stat_t *p)
+{
+	if (p)
+		free(p);
+}
+
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	uint64_t n_module_version_packed64 = -1;
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_STA, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_stat = mod;
+
+	n_module_version_packed64 = module_get_version_packed64(p->mp_mod_stat);
+	NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX\n", p_adapter_id_str,
+	       p->mn_instance, n_module_version_packed64);
+
+	{
+		nt_register_t *p_reg;
+		/* STA_CFG register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_CFG);
+		p->mp_fld_dma_ena = register_get_field(p_reg, STA_CFG_DMA_ENA);
+		p->mp_fld_cnt_clear = register_get_field(p_reg, STA_CFG_CNT_CLEAR);
+
+		/* CFG: fields NOT available from v. 3 */
+		p->mp_fld_tx_disable =
+			register_query_field(p_reg, STA_CFG_TX_DISABLE);
+		p->mp_fld_cnt_freeze = register_query_field(p_reg, STA_CFG_CNT_FRZ);
+
+		/* STA_STATUS register */
+		p_reg = module_get_register(p->mp_mod_stat, STA_STATUS);
+		p->mp_fld_stat_toggle_missed =
+			register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED);
+
+		/* HOST_ADR registers */
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB);
+		p->mp_fld_dma_lsb = register_get_field(p_reg, STA_HOST_ADR_LSB_LSB);
+
+		p_reg = module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB);
+		p->mp_fld_dma_msb = register_get_field(p_reg, STA_HOST_ADR_MSB_MSB);
+	}
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+
+	p->m_nb_nim_ports = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->m_nb_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+
+	p->m_nb_rx_ports =
+		fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); /* VSWITCH */
+	if (p->m_nb_rx_ports == -1) {
+		p->m_nb_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+							  -1); /* non-VSWITCH */
+		if (p->m_nb_rx_ports == -1) {
+			p->m_nb_rx_ports = fpga_get_product_param(p_fpga,
+								  NT_PORTS,
+								  0); /* non-VSWITCH */
+		}
+	}
+
+	p->m_nb_tx_ports = fpga_get_product_param(p_fpga, NT_TX_PORTS, 0);
+	p->m_rx_port_replicate =
+		fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0);
+
+	p->m_nb_color_counters = fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) *
+			      2; /* VSWITCH */
+	if (p->m_nb_color_counters == 0) {
+		p->m_nb_color_counters =
+			fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) *
+			2; /* non-VSWITCH */
+	}
+
+	p->m_nb_rx_host_buffers = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers;
+
+	p->m_dbs_present = fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0);
+
+	p->m_nb_rx_hb_counters =
+		(p->m_nb_rx_host_buffers *
+		 (6 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			   p->m_dbs_present :
+			   0)));
+
+	p->m_nb_tx_hb_counters = 0;
+
+	p->m_nb_rx_port_counters =
+		42 + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ?
+			  p->m_dbs_present :
+			  0);
+	p->m_nb_tx_port_counters = 0;
+
+	p->m_nb_counters =
+		p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters;
+
+	p->mn_stat_layout_version = 0;
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) {
+		p->mn_stat_layout_version = 6;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->mn_stat_layout_version = 5;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) {
+		p->mn_stat_layout_version = 4;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) {
+		p->mn_stat_layout_version = 3;
+	} else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) {
+		p->mn_stat_layout_version = 2;
+	} else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) {
+		p->mn_stat_layout_version = 1;
+	} else {
+		p->mn_stat_layout_version = 0;
+		NT_LOG(ERR, NTHW,
+		       "%s: unknown module_version 0x%08lX layout=%d\n",
+		       p_adapter_id_str, n_module_version_packed64,
+		       p->mn_stat_layout_version);
+	}
+	assert(p->mn_stat_layout_version);
+
+	/* STA module 0.2+ adds IPF counters per port (Rx feature) */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 2))
+		p->m_nb_rx_port_counters += 6;
+
+	/* STA module 0.3+ adds TX stats */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) ||
+			p->m_nb_tx_ports >= 1)
+		p->mb_has_tx_stats = true;
+
+	/* STA module 0.3+ adds TX stat counters */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 3))
+		p->m_nb_tx_port_counters += 22;
+
+	/* STA module 0.4+ adds TX drop event counter */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 4))
+		p->m_nb_tx_port_counters += 1; /* TX drop event counter */
+
+	/*
+	 * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and
+	 * duplicate counters
+	 */
+	if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) {
+		p->m_nb_rx_port_counters += 4;
+		p->m_nb_tx_port_counters += 1;
+	}
+
+	if (p->mb_is_vswitch) {
+		p->m_nb_rx_port_counters = 5;
+		p->m_nb_tx_port_counters = 5;
+	}
+
+	p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters);
+
+	if (p->mb_has_tx_stats)
+		p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters);
+
+	/* Output params (debug) */
+	NT_LOG(DBG, NTHW,
+	       "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d\n",
+	       p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports,
+	       p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers);
+	NT_LOG(DBG, NTHW,
+	       "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d\n",
+	       p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters,
+	       p->m_nb_rx_port_counters, p->m_nb_tx_port_counters);
+	NT_LOG(DBG, NTHW, "%s: layout=%d\n", p_adapter_id_str,
+	       p->mn_stat_layout_version);
+	NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)\n", p_adapter_id_str,
+	       p->m_nb_counters, p->m_nb_counters);
+	NT_LOG(DBG, NTHW, "%s: vswitch=%d\n", p_adapter_id_str, p->mb_is_vswitch);
+
+	/* Init */
+	if (p->mp_fld_tx_disable)
+		field_set_flush(p->mp_fld_tx_disable);
+
+	field_update_register(p->mp_fld_cnt_clear);
+	field_set_flush(p->mp_fld_cnt_clear);
+	field_clr_flush(p->mp_fld_cnt_clear);
+
+	field_update_register(p->mp_fld_stat_toggle_missed);
+	field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_clr_flush(p->mp_fld_dma_ena);
+	field_update_register(p->mp_fld_dma_ena);
+
+	return 0;
+}
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual)
+{
+	assert(p_stat_dma_virtual);
+	p->mp_timestamp = NULL;
+
+	p->m_stat_dma_physical = stat_dma_physical;
+	p->mp_stat_dma_virtual = p_stat_dma_virtual;
+
+	memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t)));
+
+	field_set_val_flush32(p->mp_fld_dma_msb,
+			    (uint32_t)((p->m_stat_dma_physical >> 32) &
+				       0xffffffff));
+	field_set_val_flush32(p->mp_fld_dma_lsb,
+			    (uint32_t)(p->m_stat_dma_physical & 0xffffffff));
+
+	p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters);
+	NT_LOG(DBG, NTHW,
+	       "%s: statDmaPhysical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64
+	       " mp_timestamp=%" PRIX64 "\n",
+	       __func__, p->m_stat_dma_physical, p->mp_stat_dma_virtual,
+	       p->mp_timestamp);
+	if (p->mb_is_vswitch)
+		*p->mp_timestamp = NT_OS_GET_TIME_NS();
+
+	else
+		*p->mp_timestamp = (uint64_t)(int64_t)-1;
+	return 0;
+}
+
+int nthw_stat_trigger(nthw_stat_t *p)
+{
+	int n_toggle_miss = field_get_updated(p->mp_fld_stat_toggle_missed);
+
+	if (n_toggle_miss)
+		field_set_flush(p->mp_fld_stat_toggle_missed);
+
+	if (p->mp_timestamp)
+		*p->mp_timestamp = -1; /* Clear old ts */
+
+	field_update_register(p->mp_fld_dma_ena);
+	field_set_flush(p->mp_fld_dma_ena);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/nthw_stat.h b/drivers/net/ntnic/nthw/nthw_stat.h
new file mode 100644
index 0000000000..7bce7ecd15
--- /dev/null
+++ b/drivers/net/ntnic/nthw/nthw_stat.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_STAT_H__
+#define __NTHW_STAT_H__
+
+struct nthw_stat {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_stat;
+	int mn_instance;
+
+	int mn_stat_layout_version;
+
+	bool mb_is_vswitch;
+	bool mb_has_tx_stats;
+
+	int m_nb_phy_ports;
+	int m_nb_nim_ports;
+
+	int m_nb_rx_ports;
+	int m_nb_tx_ports;
+
+	int m_nb_rx_host_buffers;
+	int m_nb_tx_host_buffers;
+
+	int m_dbs_present;
+
+	int m_rx_port_replicate;
+
+	int m_nb_color_counters;
+
+	int m_nb_rx_hb_counters;
+	int m_nb_tx_hb_counters;
+
+	int m_nb_rx_port_counters;
+	int m_nb_tx_port_counters;
+
+	int m_nb_counters;
+
+	nt_field_t *mp_fld_dma_ena;
+	nt_field_t *mp_fld_cnt_clear;
+
+	nt_field_t *mp_fld_tx_disable;
+
+	nt_field_t *mp_fld_cnt_freeze;
+
+	nt_field_t *mp_fld_stat_toggle_missed;
+
+	nt_field_t *mp_fld_dma_lsb;
+	nt_field_t *mp_fld_dma_msb;
+
+	uint64_t m_stat_dma_physical;
+	uint32_t *mp_stat_dma_virtual;
+
+	uint64_t last_ts;
+
+	uint64_t *mp_timestamp;
+};
+
+typedef struct nthw_stat nthw_stat_t;
+typedef struct nthw_stat nthw_stat;
+
+nthw_stat_t *nthw_stat_new(void);
+int nthw_stat_init(nthw_stat_t *p, nt_fpga_t *p_fpga, int n_instance);
+void nthw_stat_delete(nthw_stat_t *p);
+
+int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical,
+			   uint32_t *p_stat_dma_virtual);
+int nthw_stat_trigger(nthw_stat_t *p);
+
+#endif /* __NTHW_STAT_H__ */
diff --git a/drivers/net/ntnic/ntlog/include/ntlog.h b/drivers/net/ntnic/ntlog/include/ntlog.h
new file mode 100644
index 0000000000..81bc014d66
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/include/ntlog.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NTLOG_H
+#define NTOSS_SYSTEM_NTLOG_H
+
+#include <stdarg.h>
+#include <stdint.h>
+
+#ifndef NT_LOG_MODULE_PREFIX
+
+/* DPDK modules */
+#define NT_LOG_MODULE_EAL 0
+#define NT_LOG_MODULE_MALLOC 1
+#define NT_LOG_MODULE_RING 2
+#define NT_LOG_MODULE_MEMPOOL 3
+#define NT_LOG_MODULE_TIMER 4
+#define NT_LOG_MODULE_PMD 5
+#define NT_LOG_MODULE_HASH 6
+#define NT_LOG_MODULE_LPM 7
+#define NT_LOG_MODULE_KNI 8
+#define NT_LOG_MODULE_ACL 9
+#define NT_LOG_MODULE_POWER 10
+#define NT_LOG_MODULE_METER 11
+#define NT_LOG_MODULE_SCHED 12
+#define NT_LOG_MODULE_PORT 13
+#define NT_LOG_MODULE_TABLE 14
+#define NT_LOG_MODULE_PIPELINE 15
+#define NT_LOG_MODULE_MBUF 16
+#define NT_LOG_MODULE_CRYPTODEV 17
+#define NT_LOG_MODULE_EFD 18
+#define NT_LOG_MODULE_EVENTDEV 19
+#define NT_LOG_MODULE_GSO 20
+#define NT_LOG_MODULE_USER1 24
+#define NT_LOG_MODULE_USER2 25
+#define NT_LOG_MODULE_USER3 26
+#define NT_LOG_MODULE_USER4 27
+#define NT_LOG_MODULE_USER5 28
+#define NT_LOG_MODULE_USER6 29
+#define NT_LOG_MODULE_USER7 30
+#define NT_LOG_MODULE_USER8 31
+
+/* NT modules */
+#define NT_LOG_MODULE_GENERAL 10000 /* Should always be a first (smallest) */
+#define NT_LOG_MODULE_NTHW 10001
+#define NT_LOG_MODULE_FILTER 10002
+#define NT_LOG_MODULE_VDPA 10003
+#define NT_LOG_MODULE_FPGA 10004
+#define NT_LOG_MODULE_NTCONNECT 10005
+#define NT_LOG_MODULE_ETHDEV 10006
+#define NT_LOG_MODULE_END 10007 /* Mark for the range end of NT_LOG */
+
+#define NT_LOG_MODULE_COUNT (NT_LOG_MODULE_END - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_INDEX(module) ((module) - NT_LOG_MODULE_GENERAL)
+#define NT_LOG_MODULE_PREFIX(type) NT_LOG_MODULE_##type
+
+#endif
+
+#ifndef NT_LOG_ENABLE
+#define NT_LOG_ENABLE 1
+#endif
+
+#if defined NT_LOG_ENABLE && NT_LOG_ENABLE > 0
+#ifndef NT_LOG_ENABLE_ERR
+#define NT_LOG_ENABLE_ERR 1
+#endif
+#ifndef NT_LOG_ENABLE_WRN
+#define NT_LOG_ENABLE_WRN 1
+#endif
+#ifndef NT_LOG_ENABLE_INF
+#define NT_LOG_ENABLE_INF 1
+#endif
+#ifndef NT_LOG_ENABLE_DBG
+#define NT_LOG_ENABLE_DBG 1
+#endif
+#ifndef NT_LOG_ENABLE_DB1
+#define NT_LOG_ENABLE_DB1 0
+#endif
+#ifndef NT_LOG_ENABLE_DB2
+#define NT_LOG_ENABLE_DB2 0
+#endif
+#endif
+
+#if defined NT_LOG_ENABLE_ERR && NT_LOG_ENABLE_ERR > 0
+#define NT_LOG_NT_LOG_ERR(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_ERR(...)
+#endif
+
+#if defined NT_LOG_ENABLE_WRN && NT_LOG_ENABLE_WRN > 0
+#define NT_LOG_NT_LOG_WRN(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_WRN(...)
+#endif
+
+#if defined NT_LOG_ENABLE_INF && NT_LOG_ENABLE_INF > 0
+#define NT_LOG_NT_LOG_INF(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_INF(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DBG && NT_LOG_ENABLE_DBG > 0
+#define NT_LOG_NT_LOG_DBG(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DBG(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB1 && NT_LOG_ENABLE_DB1 > 0
+#define NT_LOG_NT_LOG_DB1(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB1(...)
+#endif
+
+#if defined NT_LOG_ENABLE_DB2 && NT_LOG_ENABLE_DB2 > 0
+#define NT_LOG_NT_LOG_DB2(...) nt_log(__VA_ARGS__)
+#else
+#define NT_LOG_NT_LOG_DB2(...)
+#endif
+
+#define NT_LOG(level, module, ...)                                          \
+	NT_LOG_NT_LOG_##level(NT_LOG_##level, NT_LOG_MODULE_PREFIX(module), \
+			      #module ": " #level ": " __VA_ARGS__)
+
+enum nt_log_level {
+	NT_LOG_ERR = 0x001,
+	NT_LOG_WRN = 0x002,
+	NT_LOG_INF = 0x004,
+	NT_LOG_DBG = 0x008,
+	NT_LOG_DB1 = 0x010,
+	NT_LOG_DB2 = 0x020,
+};
+
+struct nt_log_impl {
+	int (*init)(void);
+	int (*log)(enum nt_log_level level, uint32_t module, const char *format,
+		   va_list args);
+	int (*is_debug)(uint32_t module);
+};
+
+int nt_log_init(struct nt_log_impl *impl);
+
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...);
+
+/* Returns 1 if RTE_DEBUG, 0 if lower log level, -1 if incorrect module */
+int nt_log_is_debug(uint32_t module);
+
+/*
+ * nt log helper functions
+ * to create a string for NT_LOG usage to output a one-liner log
+ * to use when one single function call to NT_LOG is not optimal - that is
+ * you do not know the number of parameters at programming time or it is variable
+ */
+char *ntlog_helper_str_alloc(const char *sinit);
+
+void ntlog_helper_str_reset(char *s, const char *sinit);
+
+void ntlog_helper_str_add(char *s, const char *format, ...);
+
+void ntlog_helper_str_free(char *s);
+
+#endif /* NTOSS_SYSTEM_NTLOG_H */
diff --git a/drivers/net/ntnic/ntlog/ntlog.c b/drivers/net/ntnic/ntlog/ntlog.c
new file mode 100644
index 0000000000..def07f15d0
--- /dev/null
+++ b/drivers/net/ntnic/ntlog/ntlog.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_string_fns.h>
+
+#define NTLOG_HELPER_STR_SIZE_MAX (1024)
+
+static struct nt_log_impl *user_impl;
+
+int nt_log_init(struct nt_log_impl *impl)
+{
+	user_impl = impl;
+	return user_impl->init();
+}
+
+static char *last_trailing_eol(char *s)
+{
+	int i = strlen(s) - 1;
+	/* Skip spaces */
+	while (i > 0 && s[i] == ' ')
+		--i;
+	if (s[i] != '\n')
+		return NULL;
+	/*
+	 * Find the last trailing EOL "hello_world\n\n\n"
+	 *                                         ^
+	 */
+	while (i > 1 && s[i] == '\n' && s[i - 1] == '\n')
+		--i;
+	return &s[i];
+}
+
+/* Always terminates the NT_LOG statement with a !!!single!!! EOL. */
+int nt_log(enum nt_log_level level, uint32_t module, const char *format, ...)
+{
+	int rv = -1;
+	va_list args;
+
+	if (user_impl == NULL)
+		return rv;
+
+	char *actual_format = ntlog_helper_str_alloc(format);
+	char *eol = last_trailing_eol(actual_format);
+
+	if (!eol) { /* If log line is not terminated with '\n' we add it. */
+		strncat(actual_format, "\n",
+			NTLOG_HELPER_STR_SIZE_MAX - strlen(actual_format));
+	} else {   /* If multiple trailing EOLs, then keep just one of them. */
+		*(eol + 1) = '\0';
+	}
+
+	va_start(args, format);
+	rv = user_impl->log(level, module, actual_format, args);
+	va_end(args);
+
+	ntlog_helper_str_free(actual_format);
+	return rv;
+}
+
+int nt_log_is_debug(uint32_t module)
+{
+	return user_impl->is_debug(module);
+}
+
+char *ntlog_helper_str_alloc(const char *sinit)
+{
+	char *s = malloc(NTLOG_HELPER_STR_SIZE_MAX);
+
+	if (!s)
+		return NULL;
+	if (sinit)
+		rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+	else
+		s[0] = '\0';
+	return s;
+}
+
+void ntlog_helper_str_reset(char *s, const char *sinit)
+{
+	if (s) {
+		if (sinit)
+			rte_strscpy(s, sinit, NTLOG_HELPER_STR_SIZE_MAX);
+		else
+			s[0] = '\0';
+	}
+}
+
+__rte_format_printf(2, 0)
+void ntlog_helper_str_add(char *s, const char *format, ...)
+{
+	if (!s)
+		return;
+	va_list args;
+
+	va_start(args, format);
+	int len = strlen(s);
+
+	vsnprintf(&s[len], (NTLOG_HELPER_STR_SIZE_MAX - 1 - len), format, args);
+	va_end(args);
+}
+
+void ntlog_helper_str_free(char *s)
+{
+	free(s);
+}
diff --git a/drivers/net/ntnic/ntutil/include/nt_util.h b/drivers/net/ntnic/ntutil/include/nt_util.h
new file mode 100644
index 0000000000..cc6891e82c
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/include/nt_util.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTOSS_SYSTEM_NT_UTIL_H
+#define NTOSS_SYSTEM_NT_UTIL_H
+
+#include <rte_bitops.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+
+#define _unused __rte_unused
+
+#define PCIIDENT_TO_DOMAIN(pci_ident) \
+	((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU))
+#define PCIIDENT_TO_BUSNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU))
+#define PCIIDENT_TO_DEVNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU))
+#define PCIIDENT_TO_FUNCNR(pci_ident) \
+	((uint8_t)(((unsigned int)(pci_ident) >> 0) & 0x7U))
+
+#define PCIIDENT_PRINT_STR "%04x:%02x:%02x.%x"
+#define BDF_TO_PCIIDENT(dom, bus, dev, fnc) \
+	(((dom) << 16) | ((bus) << 8) | ((dev) << 3) | (fnc))
+
+/* ALIGN: Align x to a boundary */
+#define ALIGN(x, a)                           \
+	({                                    \
+		__typeof__(x) _a = (a);       \
+		((x) + (_a - 1)) & ~(_a - 1); \
+	})
+
+/* PALIGN: Align pointer p to a boundary */
+#define PALIGN(p, a) ((__typeof__(p))ALIGN((unsigned long)(p), (a)))
+
+/* Allocation size matching minimum alignment of specified size */
+#define ALIGN_SIZE(_size_) (1 << rte_log2_u64(_size_))
+
+#define NT_OS_WAIT_USEC(x)    \
+	rte_delay_us_sleep( \
+		x) /* uses usleep which schedules out the calling thread */
+/* spins in a waiting loop calling pause asm instruction uses RDTSC - precise wait */
+#define NT_OS_WAIT_USEC_POLL(x) \
+	rte_delay_us(        \
+		x)
+
+#define NT_OS_GET_TIME_US() \
+	(rte_get_timer_cycles() / (rte_get_timer_hz() / 1000 / 1000))
+#define NT_OS_GET_TIME_NS() \
+	(rte_get_timer_cycles() * 10 / (rte_get_timer_hz() / 1000 / 1000 / 100))
+#define NT_OS_GET_TIME_MONOTONIC_COUNTER() (rte_get_timer_cycles())
+
+struct nt_dma_s {
+	uint64_t iova;
+	uint64_t addr;
+	uint64_t size;
+};
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa);
+void nt_dma_free(struct nt_dma_s *vfio_addr);
+
+struct nt_util_vfio_impl {
+	int (*vfio_dma_map)(int vf_num, void *virt_addr, uint64_t *iova_addr,
+			    uint64_t size);
+	int (*vfio_dma_unmap)(int vf_num, void *virt_addr, uint64_t iova_addr,
+			      uint64_t size);
+};
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl);
+
+#endif /* NTOSS_SYSTEM_NT_UTIL_H */
diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c
new file mode 100644
index 0000000000..8f5812bf8b
--- /dev/null
+++ b/drivers/net/ntnic/ntutil/nt_util.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+
+#include "ntlog.h"
+#include "nt_util.h"
+
+static struct nt_util_vfio_impl vfio_cb;
+
+void nt_util_vfio_init(struct nt_util_vfio_impl *impl)
+{
+	vfio_cb = *impl;
+}
+
+struct nt_dma_s *nt_dma_alloc(uint64_t size, uint64_t align, int numa)
+{
+	int res;
+	struct nt_dma_s *vfio_addr;
+
+	vfio_addr = rte_malloc(NULL, sizeof(struct nt_dma_s), 0);
+	if (!vfio_addr) {
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc failed\n");
+		return NULL;
+	}
+	void *addr = rte_malloc_socket(NULL, size, align, numa);
+
+	if (!addr) {
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO rte_malloc_socket failed\n");
+		return NULL;
+	}
+	res = vfio_cb.vfio_dma_map(0, addr, &vfio_addr->iova,
+				   ALIGN_SIZE(size));
+	if (res != 0) {
+		rte_free(addr);
+		rte_free(vfio_addr);
+		NT_LOG(ERR, GENERAL, "VFIO nt_dma_map failed\n");
+		return NULL;
+	}
+
+	vfio_addr->addr = (uint64_t)addr;
+	vfio_addr->size = ALIGN_SIZE(size);
+
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA alloc addr=%" PRIX64 ", iova=%" PRIX64
+	       ", size=%u, align=0x%X\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size, align);
+
+	return vfio_addr;
+}
+
+void nt_dma_free(struct nt_dma_s *vfio_addr)
+{
+	NT_LOG(DBG, GENERAL,
+	       "VFIO DMA free addr=%" PRIX64 ", iova=%" PRIX64 ", size=%u\n",
+	       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+
+	int res = vfio_cb.vfio_dma_unmap(0, (void *)(vfio_addr->addr),
+					 vfio_addr->iova, vfio_addr->size);
+	if (res != 0) {
+		NT_LOG(WRN, GENERAL,
+		       "VFIO DMA free FAILED addr=%" PRIX64 ", iova=%" PRIX64
+		       ", size=%u\n",
+		       vfio_addr->addr, vfio_addr->iova, vfio_addr->size);
+	}
+	rte_free((void *)(vfio_addr->addr));
+	rte_free(vfio_addr);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v16 3/8] net/ntnic: adds NT200A02 adapter support
  2023-09-08 16:07 ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-09-08 16:07   ` [PATCH v16 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
@ 2023-09-08 16:07   ` Mykola Kostenok
  2023-09-08 16:07   ` [PATCH v16 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-08 16:07 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The PMD is designed to support multiple different adapters, and this commit
adds support for NT200A02 2x100G. Sensor and NIM code is included.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v13:
* Fix typo spelling warnings
---
 .../net/ntnic/adapter/common_adapter_defs.h   |   14 +
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |  477 ++++
 drivers/net/ntnic/adapter/nt4ga_adapter.h     |  108 +
 drivers/net/ntnic/adapter/nt4ga_filter.h      |   15 +
 drivers/net/ntnic/adapter/nt4ga_link.c        |  178 ++
 drivers/net/ntnic/adapter/nt4ga_link.h        |  179 ++
 drivers/net/ntnic/adapter/nt4ga_link_100g.c   |  825 +++++++
 drivers/net/ntnic/adapter/nt4ga_link_100g.h   |   12 +
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c   |  598 +++++
 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h   |   41 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |  705 ++++++
 drivers/net/ntnic/adapter/nt4ga_stat.h        |  202 ++
 drivers/net/ntnic/meson.build                 |   24 +
 drivers/net/ntnic/nim/i2c_nim.c               | 1974 +++++++++++++++++
 drivers/net/ntnic/nim/i2c_nim.h               |  122 +
 drivers/net/ntnic/nim/nim_defines.h           |  146 ++
 drivers/net/ntnic/nim/nt_link_speed.c         |  105 +
 drivers/net/ntnic/nim/nt_link_speed.h         |   34 +
 drivers/net/ntnic/nim/qsfp_registers.h        |   57 +
 drivers/net/ntnic/nim/qsfp_sensors.c          |  174 ++
 drivers/net/ntnic/nim/qsfp_sensors.h          |   18 +
 drivers/net/ntnic/nim/sfp_p_registers.h       |  100 +
 drivers/net/ntnic/nim/sfp_sensors.c           |  288 +++
 drivers/net/ntnic/nim/sfp_sensors.h           |   18 +
 .../net/ntnic/nthw/core/nthw_clock_profiles.c |   11 +-
 drivers/net/ntnic/nthw/core/nthw_core.h       |    2 +
 drivers/net/ntnic/nthw/core/nthw_gmf.c        |  290 +++
 drivers/net/ntnic/nthw/core/nthw_gmf.h        |   93 +
 .../nthw/core/nthw_nt200a02_u23_si5340_v5.h   |  344 +++
 drivers/net/ntnic/nthw/core/nthw_rmc.c        |  156 ++
 drivers/net/ntnic/nthw/core/nthw_rmc.h        |   57 +
 .../ntnic/sensors/avr_sensors/avr_sensors.c   |  104 +
 .../ntnic/sensors/avr_sensors/avr_sensors.h   |   22 +
 .../sensors/board_sensors/board_sensors.c     |   48 +
 .../sensors/board_sensors/board_sensors.h     |   18 +
 .../net/ntnic/sensors/board_sensors/tempmon.c |   42 +
 .../net/ntnic/sensors/board_sensors/tempmon.h |   16 +
 .../ntnic/sensors/nim_sensors/nim_sensors.c   |   54 +
 .../ntnic/sensors/nim_sensors/nim_sensors.h   |   19 +
 drivers/net/ntnic/sensors/ntavr/avr_intf.h    |   89 +
 drivers/net/ntnic/sensors/ntavr/ntavr.c       |   78 +
 drivers/net/ntnic/sensors/ntavr/ntavr.h       |   32 +
 drivers/net/ntnic/sensors/sensor_types.h      |  259 +++
 drivers/net/ntnic/sensors/sensors.c           |  273 +++
 drivers/net/ntnic/sensors/sensors.h           |  127 ++
 drivers/net/ntnic/sensors/stream_info.h       |   86 +
 46 files changed, 8632 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/adapter/common_adapter_defs.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_adapter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_filter.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_link_100g.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.c
 create mode 100644 drivers/net/ntnic/adapter/nt4ga_stat.h
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.c
 create mode 100644 drivers/net/ntnic/nim/i2c_nim.h
 create mode 100644 drivers/net/ntnic/nim/nim_defines.h
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.c
 create mode 100644 drivers/net/ntnic/nim/nt_link_speed.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_registers.h
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/qsfp_sensors.h
 create mode 100644 drivers/net/ntnic/nim/sfp_p_registers.h
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.c
 create mode 100644 drivers/net/ntnic/nim/sfp_sensors.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_gmf.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/core/nthw_rmc.h
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/board_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.c
 create mode 100644 drivers/net/ntnic/sensors/board_sensors/tempmon.h
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
 create mode 100644 drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/avr_intf.h
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.c
 create mode 100644 drivers/net/ntnic/sensors/ntavr/ntavr.h
 create mode 100644 drivers/net/ntnic/sensors/sensor_types.h
 create mode 100644 drivers/net/ntnic/sensors/sensors.c
 create mode 100644 drivers/net/ntnic/sensors/sensors.h
 create mode 100644 drivers/net/ntnic/sensors/stream_info.h

diff --git a/drivers/net/ntnic/adapter/common_adapter_defs.h b/drivers/net/ntnic/adapter/common_adapter_defs.h
new file mode 100644
index 0000000000..79167806f1
--- /dev/null
+++ b/drivers/net/ntnic/adapter/common_adapter_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _COMMON_ADAPTER_DEFS_H_
+#define _COMMON_ADAPTER_DEFS_H_
+
+/*
+ * Declarations shared by NT adapter types.
+ */
+#define NUM_ADAPTER_MAX (8)
+#define NUM_ADAPTER_PORTS_MAX (128)
+
+#endif /* _COMMON_ADAPTER_DEFS_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
new file mode 100644
index 0000000000..259aae2831
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_link_100g.h"
+
+/* Sensors includes */
+#include "board_sensors.h"
+#include "avr_sensors.h"
+
+/*
+ * Global variables shared by NT adapter types
+ */
+pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Signal-handler to stop all monitor threads
+ */
+static void stop_monitor_tasks(int signum)
+{
+	const size_t n = ARRAY_SIZE(monitor_task_is_running);
+	size_t i;
+
+	/* Stop all monitor tasks */
+	for (i = 0; i < n; i++) {
+		const int is_running = monitor_task_is_running[i];
+
+		monitor_task_is_running[i] = 0;
+		if (signum == -1 && is_running != 0) {
+			void *ret_val = NULL;
+
+			pthread_join(monitor_tasks[i], &ret_val);
+			memset(&monitor_tasks[i], 0, sizeof(monitor_tasks[0]));
+		}
+	}
+}
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_dev_name = p_adapter_info->p_dev_name;
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *p_fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+	char a_pci_ident_str[32];
+
+	snprintf(a_pci_ident_str, sizeof(a_pci_ident_str), "" PCIIDENT_PRINT_STR "",
+		PCIIDENT_TO_DOMAIN(p_fpga_info->pciident),
+		PCIIDENT_TO_BUSNR(p_fpga_info->pciident),
+		PCIIDENT_TO_DEVNR(p_fpga_info->pciident),
+		PCIIDENT_TO_FUNCNR(p_fpga_info->pciident));
+
+	fprintf(pfh, "%s: DeviceName: %s\n", p_adapter_id_str,
+		(p_dev_name ? p_dev_name : "NA"));
+	fprintf(pfh, "%s: PCI Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %s: %08X: %04X:%04X %04X:%04X\n", p_adapter_id_str,
+		a_pci_ident_str, p_fpga_info->pciident, p_hw_info->pci_vendor_id,
+		p_hw_info->pci_device_id, p_hw_info->pci_sub_vendor_id,
+		p_hw_info->pci_sub_device_id);
+	fprintf(pfh, "%s: FPGA Details:\n", p_adapter_id_str);
+	fprintf(pfh, "%s: %03d-%04d-%02d-%02d [%016" PRIX64 "] (%08X)\n",
+		p_adapter_id_str, p_fpga_info->n_fpga_type_id, p_fpga_info->n_fpga_prod_id,
+		p_fpga_info->n_fpga_ver_id, p_fpga_info->n_fpga_rev_id,
+		p_fpga_info->n_fpga_ident, p_fpga_info->n_fpga_build_time);
+	fprintf(pfh, "%s: FpgaDebugMode=0x%x\n", p_adapter_id_str,
+		p_fpga_info->n_fpga_debug_mode);
+	fprintf(pfh,
+		"%s: Nims=%d PhyPorts=%d PhyQuads=%d RxPorts=%d TxPorts=%d\n",
+		p_adapter_id_str, p_fpga_info->n_nims, p_fpga_info->n_phy_ports,
+		p_fpga_info->n_phy_quads, p_fpga_info->n_rx_ports, p_fpga_info->n_tx_ports);
+	fprintf(pfh, "%s: Hw=0x%02X_rev%d: %s\n", p_adapter_id_str,
+		p_hw_info->hw_platform_id, p_fpga_info->nthw_hw_info.hw_id,
+		p_fpga_info->nthw_hw_info.hw_plat_id_str);
+
+	nt4ga_stat_dump(p_adapter_info, pfh);
+
+	return 0;
+}
+
+/*
+ * SPI for sensors initialization
+ */
+static nthw_spi_v3_t *new_sensors_s_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spi_v3_t *sensors_s_spi = nthw_spi_v3_new();
+
+	if (sensors_s_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spi_v3_init(sensors_s_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spi_v3_delete(sensors_s_spi);
+		return NULL;
+	}
+
+	return sensors_s_spi;
+}
+
+/*
+ * SPI for sensors reading
+ */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga)
+{
+	nthw_spis_t *sensors_t_spi = nthw_spis_new();
+	/* init SPI for sensor initialization process */
+	if (sensors_t_spi == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI allocation error\n", __func__);
+		return NULL;
+	}
+
+	if (nthw_spis_init(sensors_t_spi, p_fpga, 0)) {
+		NT_LOG(ERR, ETHDEV, "%s: SPI initialization error\n", __func__);
+		nthw_spis_delete(sensors_t_spi);
+		return NULL;
+	}
+
+	return sensors_t_spi;
+}
+
+static void adapter_sensor_setup(hw_info_t *p_hw_info, struct adapter_info_s *adapter)
+{
+	struct nt_fpga_s *p_fpga = adapter->fpga_info.mp_fpga;
+	struct nt_sensor_group *sensors_list_ptr = NULL;
+	nthw_spi_v3_t *sensors_s_spi = new_sensors_s_spi(p_fpga);
+
+	adapter->adapter_sensors_cnt = 0;
+
+	/* FPGA */
+	adapter->adapter_sensors = fpga_temperature_sensor_init(p_hw_info->n_nthw_adapter_id,
+								NT_SENSOR_FPGA_TEMP, p_fpga);
+	sensors_list_ptr = adapter->adapter_sensors;
+	adapter->adapter_sensors_cnt++;
+
+	/* AVR */
+	if (sensors_s_spi) {
+		if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+					   SENSOR_MON_CTRL_REM_ALL_SENSORS) != 0) {
+			/* stop sensor monitoring */
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to stop AVR sensors monitoring\n");
+		} else {
+			NT_LOG(DBG, ETHDEV, "AVR sensors init started\n");
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "FAN0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_FAN,
+								 NT_SENSOR_NT200E3_FAN_SPEED,
+								 SENSOR_MON_FAN, 0,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &fan, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU0",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PSU0_TEMP,
+								 SENSOR_MON_PSU_EXAR_7724_0, 0x15,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &exar7724_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PSU1",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200A02_PSU1_TEMP,
+								 SENSOR_MON_MP2886A, 0x8d,
+								 SENSOR_MON_BIG_ENDIAN,
+								 SENSOR_MON_UNSIGNED,
+								 &mp2886a_tj, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			sensors_list_ptr->next = avr_sensor_init(sensors_s_spi,
+								 p_hw_info->n_nthw_adapter_id,
+								 "PCB",
+								 NT_SENSOR_SOURCE_ADAPTER,
+								 NT_SENSOR_TYPE_TEMPERATURE,
+								 NT_SENSOR_NT200E3_PCB_TEMP,
+								 SENSOR_MON_DS1775, 0,
+								 SENSOR_MON_LITTLE_ENDIAN,
+								 SENSOR_MON_SIGNED,
+								 &ds1775_t, 0xFFFF);
+			sensors_list_ptr = sensors_list_ptr->next;
+			adapter->adapter_sensors_cnt++;
+
+			NT_LOG(DBG, ETHDEV, "AVR sensors init finished\n");
+
+			if (nt_avr_sensor_mon_ctrl(sensors_s_spi,
+						   SENSOR_MON_CTRL_RUN) != 0) {
+				/* start sensor monitoring */
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to start AVR sensors monitoring\n");
+			} else {
+				NT_LOG(DBG, ETHDEV,
+				       "AVR sensors monitoring starteed\n");
+			}
+		}
+
+		nthw_spi_v3_delete(sensors_s_spi);
+	}
+}
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
+{
+	char *const p_dev_name = malloc(24);
+	char *const p_adapter_id_str = malloc(24);
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	hw_info_t *p_hw_info = &p_adapter_info->hw_info;
+
+	/*
+	 * IMPORTANT: Most variables cannot be determined before fpga model is instantiated
+	 * (nthw_fpga_init())
+	 */
+	int n_phy_ports = -1;
+	int n_nim_ports = -1;
+	int res = -1;
+	nt_fpga_t *p_fpga = NULL;
+
+	(void)n_nim_ports; /* currently UNUSED - prevent warning */
+
+	p_hw_info->n_nthw_adapter_id =
+		nthw_platform_get_nthw_adapter_id(p_hw_info->pci_device_id);
+
+	fpga_info->n_nthw_adapter_id = p_hw_info->n_nthw_adapter_id;
+	p_hw_info->hw_product_type = p_hw_info->pci_device_id &
+				   0x000f; /* ref: DN-0060 section 9 */
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_platform_id = (p_hw_info->pci_device_id >> 4) & 0x00ff;
+	/* ref: DN-0060 section 9 */
+	p_hw_info->hw_reserved1 = (p_hw_info->pci_device_id >> 12) & 0x000f;
+
+	/* mp_dev_name */
+	p_adapter_info->p_dev_name = p_dev_name;
+	if (p_dev_name) {
+		snprintf(p_dev_name, 24, "" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: (0x%08X)\n", p_dev_name,
+		       p_adapter_info->fpga_info.pciident);
+	}
+
+	/* mp_adapter_id_str */
+	p_adapter_info->mp_adapter_id_str = p_adapter_id_str;
+
+	p_adapter_info->fpga_info.mp_adapter_id_str = p_adapter_id_str;
+
+	if (p_adapter_id_str) {
+		snprintf(p_adapter_id_str, 24, "PCI:" PCIIDENT_PRINT_STR "",
+			 PCIIDENT_TO_DOMAIN(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_BUSNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_DEVNR(p_adapter_info->fpga_info.pciident),
+			 PCIIDENT_TO_FUNCNR(p_adapter_info->fpga_info.pciident));
+		NT_LOG(DBG, ETHDEV, "%s: %s\n", p_adapter_id_str, p_dev_name);
+	}
+
+	{
+		int i;
+
+		for (i = 0; i < (int)ARRAY_SIZE(p_adapter_info->mp_port_id_str);
+				i++) {
+			char *p = malloc(32);
+
+			if (p) {
+				snprintf(p, 32, "%s:intf_%d",
+					 (p_adapter_id_str ? p_adapter_id_str : "NA"),
+					 i);
+				NT_LOG(DBG, ETHDEV, "%s\n", p);
+			}
+			p_adapter_info->mp_port_id_str[i] = p;
+		}
+	}
+
+	res = nthw_fpga_init(&p_adapter_info->fpga_info);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: %s: FPGA=%04d res=x%08X [%s:%u]\n",
+		       p_adapter_id_str, p_dev_name, fpga_info->n_fpga_prod_id, res,
+		       __func__, __LINE__);
+		return res;
+	}
+
+	assert(fpga_info);
+	p_fpga = fpga_info->mp_fpga;
+	assert(p_fpga);
+	n_phy_ports = fpga_info->n_phy_ports;
+	assert(n_phy_ports >= 1);
+	n_nim_ports = fpga_info->n_nims;
+	assert(n_nim_ports >= 1);
+
+	/*
+	 * HIF/PCI TA/TG
+	 */
+	{
+		res = nt4ga_pci_ta_tg_init(p_adapter_info);
+		if (res == 0) {
+			nt4ga_pci_ta_tg_measure_throughput_main(p_adapter_info,
+								0, 0,
+								TG_PKT_SIZE,
+								TG_NUM_PACKETS,
+								TG_DELAY);
+		} else {
+			NT_LOG(WRN, ETHDEV,
+			       "%s: PCI TA/TG is not available - skipping\n",
+			       p_adapter_id_str);
+		}
+	}
+
+	adapter_sensor_setup(p_hw_info, p_adapter_info);
+
+	{
+		int i;
+
+		assert(fpga_info->n_fpga_prod_id > 0);
+		for (i = 0; i < NUM_ADAPTER_PORTS_MAX; i++) {
+			/* Disable all ports. Must be enabled later */
+			p_adapter_info->nt4ga_link.port_action[i].port_disable =
+				true;
+		}
+		switch (fpga_info->n_fpga_prod_id) {
+		/* NT200A02: 2x100G */
+		case 9563: /* NT200A02 */
+			res = nt4ga_link_100g_ports_init(p_adapter_info, p_fpga);
+			break;
+		default:
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Unsupported FPGA product: %04d\n", __func__,
+			       fpga_info->n_fpga_prod_id);
+			res = -1;
+			break;
+		}
+
+		if (res) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: %s: %s: %u: FPGA=%04d res=x%08X\n",
+			       p_adapter_id_str, p_dev_name, __func__, __LINE__,
+			       fpga_info->n_fpga_prod_id, res);
+			return res;
+		}
+	}
+
+	/*
+	 * HostBuffer Systems
+	 */
+	p_adapter_info->n_rx_host_buffers = 0;
+	p_adapter_info->n_tx_host_buffers = 0;
+
+	p_adapter_info->fpga_info.mp_nthw_epp = NULL;
+	if (nthw_epp_present(p_adapter_info->fpga_info.mp_fpga, 0)) {
+		p_adapter_info->fpga_info.mp_nthw_epp = nthw_epp_new();
+		if (p_adapter_info->fpga_info.mp_nthw_epp == NULL) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot create EPP\n",
+			       p_adapter_id_str);
+			return -1;
+		}
+
+		res = nthw_epp_init(p_adapter_info->fpga_info.mp_nthw_epp,
+				    p_adapter_info->fpga_info.mp_fpga, 0);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot initialize EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+		NT_LOG(DBG, ETHDEV, "%s: Initialized EPP\n",
+		       p_adapter_id_str);
+
+		res = nthw_epp_setup(p_adapter_info->fpga_info.mp_nthw_epp);
+		if (res != 0) {
+			NT_LOG(ERR, ETHDEV, "%s: Cannot setup EPP\n",
+			       p_adapter_id_str);
+			return res;
+		}
+	}
+
+	/* Nt4ga Stat init/setup */
+	res = nt4ga_stat_init(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+	res = nt4ga_stat_setup(p_adapter_info);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot setup the statistics module\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
+	return 0;
+}
+
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	int i;
+	int res;
+	struct nt_sensor_group *cur_adapter_sensor = NULL;
+	struct nt_sensor_group *next_adapter_sensor = NULL;
+	struct nim_sensor_group *cur_nim_sensor = NULL;
+	struct nim_sensor_group *next_nim_sensor = NULL;
+
+	stop_monitor_tasks(-1);
+
+	nt4ga_stat_stop(p_adapter_info);
+
+	nthw_fpga_shutdown(&p_adapter_info->fpga_info);
+
+	/* Rac rab reset flip flop */
+	res = nthw_rac_rab_reset(fpga_info->mp_nthw_rac);
+
+	/* Free adapter port ident strings */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->mp_port_id_str[i]) {
+			free(p_adapter_info->mp_port_id_str[i]);
+			p_adapter_info->mp_port_id_str[i] = NULL;
+		}
+	}
+
+	/* Free adapter ident string */
+	if (p_adapter_info->mp_adapter_id_str) {
+		free(p_adapter_info->mp_adapter_id_str);
+		p_adapter_info->mp_adapter_id_str = NULL;
+	}
+
+	/* Free devname ident string */
+	if (p_adapter_info->p_dev_name) {
+		free(p_adapter_info->p_dev_name);
+		p_adapter_info->p_dev_name = NULL;
+	}
+
+	/* Free adapter sensors */
+	if (p_adapter_info->adapter_sensors != NULL) {
+		do {
+			cur_adapter_sensor = p_adapter_info->adapter_sensors;
+			next_adapter_sensor =
+				p_adapter_info->adapter_sensors->next;
+			p_adapter_info->adapter_sensors = next_adapter_sensor;
+
+			sensor_deinit(cur_adapter_sensor);
+		} while (next_adapter_sensor != NULL);
+	}
+
+	/* Free NIM sensors */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		if (p_adapter_info->nim_sensors[i] != NULL) {
+			do {
+				cur_nim_sensor = p_adapter_info->nim_sensors[i];
+				next_nim_sensor =
+					p_adapter_info->nim_sensors[i]->next;
+				p_adapter_info->nim_sensors[i] = next_nim_sensor;
+				free(cur_nim_sensor->sensor);
+				free(cur_nim_sensor);
+			} while (next_nim_sensor != NULL);
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.h b/drivers/net/ntnic/adapter/nt4ga_adapter.h
new file mode 100644
index 0000000000..6ae78a3743
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_ADAPTER_H_
+#define _NT4GA_ADAPTER_H_
+
+#include "common_adapter_defs.h"
+
+struct adapter_info_s;
+
+/*
+ * DN-0060 section 9
+ */
+typedef struct hw_info_s {
+	/* pciids */
+	uint16_t pci_vendor_id;
+	uint16_t pci_device_id;
+	uint16_t pci_sub_vendor_id;
+	uint16_t pci_sub_device_id;
+	uint16_t pci_class_id;
+
+	/* Derived from pciid */
+	nthw_adapter_id_t n_nthw_adapter_id;
+	int hw_platform_id;
+	int hw_product_type;
+	int hw_reserved1;
+} hw_info_t;
+
+/*
+ * Services provided by the adapter module
+ */
+#include "nt4ga_pci_ta_tg.h"
+#include "nt4ga_filter.h"
+#include "nt4ga_stat.h"
+#include "nt4ga_link.h"
+
+#include "sensors.h"
+#include "i2c_nim.h"
+#include "sensor_types.h"
+
+typedef struct adapter_info_s {
+	struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+	struct nt4ga_stat_s nt4ga_stat;
+	struct nt4ga_filter_s nt4ga_filter;
+	struct nt4ga_link_s nt4ga_link;
+
+	struct hw_info_s hw_info;
+	struct fpga_info_s fpga_info;
+
+	uint16_t adapter_sensors_cnt;
+	uint16_t nim_sensors_cnt[NUM_ADAPTER_PORTS_MAX];
+	struct nt_sensor_group *adapter_sensors;
+	struct nim_sensor_group *nim_sensors[NUM_ADAPTER_PORTS_MAX];
+
+	char *mp_port_id_str[NUM_ADAPTER_PORTS_MAX];
+	char *mp_adapter_id_str;
+	char *p_dev_name;
+	volatile bool *pb_shutdown;
+
+	int adapter_no;
+	int n_rx_host_buffers;
+	int n_tx_host_buffers;
+} adapter_info_t;
+
+/*
+ * Monitor task operations.  This structure defines the management hooks for
+ * Napatech network devices.  The following hooks can be defined; unless noted
+ * otherwise, they are optional and can be filled with a null pointer.
+ *
+ * int (*mto_open)(int adapter, int port);
+ *     The function to call when a network device transitions to the up state,
+ *     e.g., `ip link set <interface> up`.
+ *
+ * int (*mto_stop)(int adapter, int port);
+ *     The function to call when a network device transitions to the down state,
+ *     e.g., `ip link set <interface> down`.
+ */
+struct monitor_task_ops {
+	int (*mto_open)(int adapter, int port);
+	int (*mto_stop)(int adapter, int port);
+};
+
+#include <pthread.h>
+#include <signal.h>
+
+/* The file nt4ga_adapter.c defines the next four variables. */
+extern pthread_t monitor_tasks[NUM_ADAPTER_MAX];
+extern volatile int monitor_task_is_running[NUM_ADAPTER_MAX];
+
+/*
+ * Function that sets up signal handler(s) that stop the monitoring tasks.
+ */
+int set_up_signal_handlers_to_stop_monitoring_tasks(void);
+
+int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_deinit(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_adapter_status(struct adapter_info_s *p_adapter_info);
+int nt4ga_adapter_transmit_packet(struct adapter_info_s *p_adapter_info,
+				  int n_intf_no, uint8_t *p_pkt, int n_pkt_len);
+
+int nt4ga_adapter_show_info(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+/* SPI for sensors reading */
+nthw_spis_t *new_sensors_t_spi(struct nt_fpga_s *p_fpga);
+
+#endif /* _NT4GA_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_filter.h b/drivers/net/ntnic/adapter/nt4ga_filter.h
new file mode 100644
index 0000000000..ad7e7d8c71
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_filter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_FILTER_H_
+#define NT4GA_FILTER_H_
+
+typedef struct nt4ga_filter_s {
+	int n_intf_cnt;
+	int n_queues_per_intf_cnt;
+
+	struct flow_nic_dev *mp_flow_device;
+} nt4ga_filter_t;
+
+#endif /* NT4GA_FILTER_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.c b/drivers/net/ntnic/adapter/nt4ga_link.c
new file mode 100644
index 0000000000..7fbdb72897
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+
+#include "nt4ga_link.h"
+#include "nt_util.h"
+
+/*
+ * port: speed capabilitoes
+ * This is actually an adapter capability mapped onto every port
+ */
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p _unused,
+		int port _unused)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const uint32_t nt_link_speed_capa = p_link->speed_capa;
+	return nt_link_speed_capa;
+}
+
+/*
+ * port: nim present
+ */
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool nim_present = p_link->link_state[port].nim_present;
+	return nim_present;
+}
+
+/*
+ * port: link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port, bool adm_state)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_disable = !adm_state;
+}
+
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	const bool adm_state = !p_link->port_action[port].port_disable;
+	return adm_state;
+}
+
+/*
+ * port: link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port,
+				bool link_status)
+{
+	/* Setting link state/status is (currently) the same as controlling the port adm state */
+	nt4ga_port_set_adm_state(p, port, link_status);
+}
+
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	bool status = p_link->link_state[port].link_up;
+	return status;
+}
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_speed = speed;
+	p_link->link_info[port].link_speed = speed;
+}
+
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_speed_t speed = p_link->link_info[port].link_speed;
+	return speed;
+}
+
+/*
+ * port: link autoneg
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused, bool autoneg _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+}
+
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p _unused,
+				 int port _unused)
+{
+	nt4ga_link_t *const p_link _unused = &p->nt4ga_link;
+	return true;
+}
+
+/*
+ * port: link duplex
+ * Currently not fully supported by link code
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_duplex = duplex;
+}
+
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nt_link_duplex_t duplex = p_link->link_info[port].link_duplex;
+	return duplex;
+}
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	p_link->port_action[port].port_lpbk_mode = mode;
+}
+
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+
+	return p_link->port_action[port].port_lpbk_mode;
+}
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p, int port)
+{
+	nt4ga_link_t *const p_link = &p->nt4ga_link;
+	nim_i2c_ctx_t nim_ctx = p_link->u.var100g.nim_ctx[port];
+	return nim_ctx;
+}
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable)
+{
+	nt4ga_link_t *link_info = &p->nt4ga_link;
+
+	if (link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_SR4 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28 ||
+			link_info->u.nim_ctx[port].port_type == NT_PORT_TYPE_QSFP28_LR4) {
+		nim_i2c_ctx_t *nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+		if (!nim_ctx->specific_u.qsfp.rx_only) {
+			if (nim_qsfp_plus_nim_set_tx_laser_disable(nim_ctx, disable,
+							       -1) != 0)
+				return 1;
+		}
+	} else {
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link.h b/drivers/net/ntnic/adapter/nt4ga_link.h
new file mode 100644
index 0000000000..2be9f49075
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_H_
+#define NT4GA_LINK_H_
+
+#include "common_adapter_defs.h"
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "nthw_fpga_rst_nt200a0x.h"
+
+/*
+ * Link state.\n
+ * Just after start of ntservice the link state might be unknown since the
+ * monitoring routine is busy reading NIM state and NIM data. This might also
+ * be the case after a NIM is plugged into an interface.
+ * The error state indicates a HW reading error.
+ */
+enum nt_link_state_e {
+	NT_LINK_STATE_UNKNOWN = 0, /* The link state has not been read yet */
+	NT_LINK_STATE_DOWN = 1, /* The link state is DOWN */
+	NT_LINK_STATE_UP = 2, /* The link state is UP */
+	NT_LINK_STATE_ERROR = 3 /* The link state could not be read */
+};
+
+typedef enum nt_link_state_e nt_link_state_t, *nt_link_state_p;
+
+/*
+ * Link duplex mode
+ */
+enum nt_link_duplex_e {
+	NT_LINK_DUPLEX_UNKNOWN = 0,
+	NT_LINK_DUPLEX_HALF = 0x01, /* Half duplex */
+	NT_LINK_DUPLEX_FULL = 0x02, /* Full duplex */
+};
+
+typedef enum nt_link_duplex_e nt_link_duplex_t;
+
+/*
+ * Link loopback mode
+ */
+enum nt_link_loopback_e {
+	NT_LINK_LOOPBACK_OFF = 0,
+	NT_LINK_LOOPBACK_HOST = 0x01, /* Host loopback mode */
+	NT_LINK_LOOPBACK_LINE = 0x02, /* Line loopback mode */
+};
+
+/*
+ * Link MDI mode
+ */
+enum nt_link_mdi_e {
+	NT_LINK_MDI_NA = 0,
+	NT_LINK_MDI_AUTO = 0x01, /* MDI auto */
+	NT_LINK_MDI_MDI = 0x02, /* MDI mode */
+	NT_LINK_MDI_MDIX = 0x04, /* MDIX mode */
+};
+
+typedef enum nt_link_mdi_e nt_link_mdi_t;
+
+/*
+ * Link Auto/Manual mode
+ */
+enum nt_link_auto_neg_e {
+	NT_LINK_AUTONEG_NA = 0,
+	NT_LINK_AUTONEG_MANUAL = 0x01,
+	NT_LINK_AUTONEG_OFF = NT_LINK_AUTONEG_MANUAL, /* Auto negotiation OFF */
+	NT_LINK_AUTONEG_AUTO = 0x02,
+	NT_LINK_AUTONEG_ON = NT_LINK_AUTONEG_AUTO, /* Auto negotiation ON */
+};
+
+typedef enum nt_link_auto_neg_e nt_link_auto_neg_t;
+
+/*
+ * Callback functions to setup mac, pcs and phy
+ */
+typedef struct link_state_s {
+	bool link_disabled;
+	bool nim_present;
+	bool lh_nim_absent;
+	bool link_up;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e link_state_latched;
+} link_state_t;
+
+typedef struct link_info_s {
+	enum nt_link_speed_e link_speed;
+	enum nt_link_duplex_e link_duplex;
+	enum nt_link_auto_neg_e link_auto_neg;
+} link_info_t;
+
+typedef struct port_action_s {
+	bool port_disable;
+	enum nt_link_speed_e port_speed;
+	enum nt_link_duplex_e port_duplex;
+	uint32_t port_lpbk_mode;
+} port_action_t;
+
+typedef struct adapter_100g_s {
+	nim_i2c_ctx_t
+	nim_ctx[NUM_ADAPTER_PORTS_MAX]; /* Should be the first field */
+	nthw_mac_pcs_t mac_pcs100g[NUM_ADAPTER_PORTS_MAX];
+	nthw_gpio_phy_t gpio_phy[NUM_ADAPTER_PORTS_MAX];
+} adapter_100g_t;
+
+typedef union adapter_var_s {
+	nim_i2c_ctx_t nim_ctx
+	[NUM_ADAPTER_PORTS_MAX]; /* First field in all the adaptors type */
+	adapter_100g_t var100g;
+} adapter_var_u;
+
+typedef struct nt4ga_link_s {
+	link_state_t link_state[NUM_ADAPTER_PORTS_MAX];
+	link_info_t link_info[NUM_ADAPTER_PORTS_MAX];
+	port_action_t port_action[NUM_ADAPTER_PORTS_MAX];
+	uint32_t speed_capa;
+	/* */
+	bool variables_initialized;
+	adapter_var_u u;
+} nt4ga_link_t;
+
+bool nt4ga_port_get_nim_present(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link mode
+ */
+void nt4ga_port_set_adm_state(struct adapter_info_s *p, int port,
+			      bool adm_state);
+bool nt4ga_port_get_adm_state(struct adapter_info_s *p, int port);
+
+/*
+ * port:s link status
+ */
+void nt4ga_port_set_link_status(struct adapter_info_s *p, int port, bool status);
+bool nt4ga_port_get_link_status(struct adapter_info_s *p, int port);
+
+/*
+ * port: link autoneg
+ */
+void nt4ga_port_set_link_autoneg(struct adapter_info_s *p, int port,
+				 bool autoneg);
+bool nt4ga_port_get_link_autoneg(struct adapter_info_s *p, int port);
+
+/*
+ * port: link speed
+ */
+void nt4ga_port_set_link_speed(struct adapter_info_s *p, int port,
+			       nt_link_speed_t speed);
+nt_link_speed_t nt4ga_port_get_link_speed(struct adapter_info_s *p, int port);
+
+/*
+ * port: link duplex
+ */
+void nt4ga_port_set_link_duplex(struct adapter_info_s *p, int port,
+				nt_link_duplex_t duplex);
+nt_link_duplex_t nt4ga_port_get_link_duplex(struct adapter_info_s *p, int port);
+
+/*
+ * port: loopback mode
+ */
+void nt4ga_port_set_loopback_mode(struct adapter_info_s *p, int port,
+				  uint32_t mode);
+uint32_t nt4ga_port_get_loopback_mode(struct adapter_info_s *p, int port);
+
+uint32_t nt4ga_port_get_link_speed_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: nim capabilities
+ */
+nim_i2c_ctx_t nt4ga_port_get_nim_capabilities(struct adapter_info_s *p,
+		int port);
+
+/*
+ * port: tx power
+ */
+int nt4ga_port_tx_power(struct adapter_info_s *p, int port, bool disable);
+
+#endif /* NT4GA_LINK_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.c b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
new file mode 100644
index 0000000000..8465b6a341
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+#include "i2c_nim.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_link_100g.h"
+
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Prototypes
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap);
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs);
+
+/*
+ * Structs and types definitions
+ */
+enum link_up_state {
+	RESET, /* A valid signal is detected by NO local faults. */
+	EXPECT_NO_LF, /* After that we check NO latched local fault bit before */
+	/* de-asserting Remote fault indication. */
+	WAIT_STABLE_LINK, /* Now we expect the link is up. */
+	MONITOR_LINK /* After link-up we monitor link state. */
+};
+
+typedef struct _monitoring_state {
+	/* Fields below are set by monitoring thread */
+	enum link_up_state m_link_up_state;
+	enum nt_link_state_e link_state;
+	enum nt_link_state_e latch_link_state;
+	int m_time_out;
+} monitoring_state_t, *monitoring_state_p;
+
+/*
+ * Global variables
+ */
+
+/*
+ * External state, to be set by the network driver.
+ */
+
+/*
+ * Utility functions
+ */
+
+static void set_loopback(struct adapter_info_s *p_adapter_info,
+			  nthw_mac_pcs_t *mac_pcs, int intf_no, uint32_t mode,
+			  uint32_t last_mode)
+{
+	bool swap_polerity = true;
+
+	switch (mode) {
+	case 1:
+		NT_LOG(INF, ETHDEV, "%s: Applying host loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_fec(mac_pcs, true);
+		nthw_mac_pcs_set_host_loopback(mac_pcs, true);
+		swap_polerity = false;
+		break;
+	case 2:
+		NT_LOG(INF, ETHDEV, "%s: Applying line loopback\n",
+		       p_adapter_info->mp_port_id_str[intf_no]);
+		nthw_mac_pcs_set_line_loopback(mac_pcs, true);
+		break;
+	default:
+		switch (last_mode) {
+		case 1:
+			NT_LOG(INF, ETHDEV, "%s: Removing host loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+			break;
+		case 2:
+			NT_LOG(INF, ETHDEV, "%s: Removing line loopback\n",
+			       p_adapter_info->mp_port_id_str[intf_no]);
+			nthw_mac_pcs_set_line_loopback(mac_pcs, false);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		break;
+	}
+
+	if ((p_adapter_info->fpga_info.nthw_hw_info.hw_id == 2 &&
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A01) ||
+			p_adapter_info->hw_info.n_nthw_adapter_id == NT_HW_ADAPTER_ID_NT200A02) {
+		(void)swap_tx_rx_polarity(p_adapter_info, mac_pcs, intf_no,
+					   swap_polerity);
+	}
+
+	/* After changing the loopback the system must be properly reset */
+	reset_rx(p_adapter_info, mac_pcs);
+
+	NT_OS_WAIT_USEC(10000); /* 10ms - arbitrary choice */
+
+	if (!nthw_mac_pcs_is_rx_path_rst(mac_pcs)) {
+		nthw_mac_pcs_reset_bip_counters(mac_pcs);
+		if (!nthw_mac_pcs_get_fec_bypass(mac_pcs))
+			nthw_mac_pcs_reset_fec_counters(mac_pcs);
+	}
+}
+
+/*
+ * Function to retrieve the current state of a link (for one port)
+ */
+static int link_state_build(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+			     nthw_gpio_phy_t *gpio_phy, int port,
+			     link_state_t *state, bool is_port_disabled)
+{
+	uint32_t abs;
+	uint32_t phy_link_state;
+	uint32_t lh_abs;
+	uint32_t ll_phy_link_state;
+	uint32_t link_down_cnt;
+	uint32_t nim_interr;
+	uint32_t lh_local_fault;
+	uint32_t lh_remote_fault;
+	uint32_t lh_internal_local_fault;
+	uint32_t lh_received_local_fault;
+
+	memset(state, 0, sizeof(*state));
+	state->link_disabled = is_port_disabled;
+	nthw_mac_pcs_get_link_summary(mac_pcs, &abs, &phy_link_state, &lh_abs,
+				  &ll_phy_link_state, &link_down_cnt,
+				  &nim_interr, &lh_local_fault,
+				  &lh_remote_fault, &lh_internal_local_fault,
+				  &lh_received_local_fault);
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	state->nim_present =
+		nthw_gpio_phy_is_module_present(gpio_phy, (uint8_t)port);
+	state->lh_nim_absent = !state->nim_present;
+	state->link_up = phy_link_state ? true : false;
+
+	{
+		static char lsbuf[NUM_ADAPTER_MAX][NUM_ADAPTER_PORTS_MAX][256];
+		char buf[255];
+		const int adapter_no = drv->adapter_no;
+
+		snprintf(buf, sizeof(buf),
+			 "%s: Port = %d: abs = %u, phy_link_state = %u, lh_abs = %u, "
+			 "ll_phy_link_state = %u, link_down_cnt = %u, nim_interr = %u, "
+			 "lh_local_fault = %u, lh_remote_fault = %u, lh_internal_local_fault = %u, "
+			 "lh_received_local_fault = %u",
+			drv->mp_adapter_id_str, mac_pcs->mn_instance, abs,
+			phy_link_state, lh_abs, ll_phy_link_state,
+			link_down_cnt, nim_interr, lh_local_fault,
+			lh_remote_fault, lh_internal_local_fault,
+			lh_received_local_fault);
+		if (strcmp(lsbuf[adapter_no][port], buf) != 0) {
+			rte_strscpy(lsbuf[adapter_no][port], buf,
+				sizeof(lsbuf[adapter_no][port]) - 1U);
+			lsbuf[adapter_no][port]
+			[sizeof(lsbuf[adapter_no][port]) - 1U] = '\0';
+			NT_LOG(DBG, ETHDEV, "%s\n", lsbuf[adapter_no][port]);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check whether a NIM module is present
+ */
+static bool nim_is_present(nthw_gpio_phy_t *gpio_phy, uint8_t if_no)
+{
+	assert(if_no < NUM_ADAPTER_PORTS_MAX);
+
+	return nthw_gpio_phy_is_module_present(gpio_phy, if_no);
+}
+
+/*
+ * Enable RX
+ */
+static int enable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Enable TX
+ */
+static int enable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, true);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, true);
+	return 0;
+}
+
+/*
+ * Disable RX
+ */
+static int disable_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_rx_enable(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Disable TX
+ */
+static int disable_tx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv; /* unused */
+	nthw_mac_pcs_set_tx_enable(mac_pcs, false);
+	nthw_mac_pcs_set_tx_sel_host(mac_pcs, false);
+	return 0;
+}
+
+/*
+ * Reset RX
+ */
+static int reset_rx(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	(void)drv;
+
+	nthw_mac_pcs_rx_path_rst(mac_pcs, true);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+	nthw_mac_pcs_rx_path_rst(mac_pcs, false);
+	NT_OS_WAIT_USEC(10000); /* 10ms */
+
+	return 0;
+}
+
+/*
+ * Reset TX
+ */
+
+/*
+ * Swap tx/rx polarity
+ */
+static int swap_tx_rx_polarity(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs,
+				int port, bool swap)
+{
+	const bool tx_polarity_swap[2][4] = { { true, true, false, false },
+		{ false, true, false, false }
+	};
+	const bool rx_polarity_swap[2][4] = { { false, true, true, true },
+		{ false, true, true, false }
+	};
+	uint8_t lane;
+
+	(void)drv;
+	for (lane = 0U; lane < 4U; lane++) {
+		if (swap) {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane,
+							  tx_polarity_swap[port][lane]);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane,
+							  rx_polarity_swap[port][lane]);
+		} else {
+			nthw_mac_pcs_swap_gty_tx_polarity(mac_pcs, lane, false);
+			nthw_mac_pcs_swap_gty_rx_polarity(mac_pcs, lane, false);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Check link once NIM is installed and link can be expected.
+ */
+static int check_link_state(adapter_info_t *drv, nthw_mac_pcs_t *mac_pcs)
+{
+	bool rst_required;
+	bool ber;
+	bool fec_all_locked;
+
+	rst_required = nthw_mac_pcs_reset_required(mac_pcs);
+
+	ber = nthw_mac_pcs_get_hi_ber(mac_pcs);
+
+	fec_all_locked = nthw_mac_pcs_get_fec_stat_all_am_locked(mac_pcs);
+
+	if (rst_required || ber || !fec_all_locked)
+		reset_rx(drv, mac_pcs);
+
+	return 0;
+}
+
+/*
+ * Initialize NIM, Code based on nt200e3_2_ptp.cpp: MyPort::createNim()
+ */
+static int create_nim(adapter_info_t *drv, nt_fpga_t *fpga, int port,
+		       bool enable)
+{
+	int res = 0;
+	const uint8_t valid_nim_id = 17U;
+	nthw_gpio_phy_t *gpio_phy;
+	nim_i2c_ctx_t *nim_ctx;
+	sfp_nim_state_t nim;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nthw_mac_pcs_t *mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	(void)fpga; /* unused */
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	gpio_phy = &link_info->u.var100g.gpio_phy[port];
+	nim_ctx = &link_info->u.var100g.nim_ctx[port];
+
+	/*
+	 * Check NIM is present before doing GPIO PHY reset.
+	 */
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(INF, ETHDEV, "%s: NIM module is absent\n",
+		       drv->mp_port_id_str[port]);
+		return 0;
+	}
+
+	if (!enable) {
+		disable_rx(drv, mac_pcs);
+		disable_tx(drv, mac_pcs);
+		reset_rx(drv, mac_pcs);
+	}
+
+	/*
+	 * Perform PHY reset.
+	 */
+	NT_LOG(DBG, ETHDEV, "%s: Performing NIM reset\n",
+	       drv->mp_port_id_str[port]);
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, true);
+	NT_OS_WAIT_USEC(100000); /* pause 0.1s */
+	nthw_gpio_phy_set_reset(gpio_phy, (uint8_t)port, false);
+
+	/*
+	 * Wait a little after a module has been inserted before trying to access I2C
+	 * data, otherwise the module will not respond correctly.
+	 */
+	NT_OS_WAIT_USEC(1000000); /* pause 1.0s */
+
+	if (!nim_is_present(gpio_phy, (uint8_t)port)) {
+		NT_LOG(DBG, ETHDEV, "%s: NIM module is no longer absent!\n",
+		       drv->mp_port_id_str[port]);
+		return -1;
+	}
+
+	res = construct_and_preinit_nim(nim_ctx, NULL, port,
+					((struct adapter_info_s *)drv)->nim_sensors,
+					&((struct adapter_info_s *)drv)->nim_sensors_cnt[port]);
+	if (res)
+		return res;
+
+	res = nim_state_build(nim_ctx, &nim);
+	if (res)
+		return res;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+	       drv->mp_port_id_str[port], nim_ctx->nim_id,
+	       nim_id_to_text(nim_ctx->nim_id), nim.br, nim_ctx->vendor_name,
+	       nim_ctx->prod_no, nim_ctx->serial_no);
+
+	/*
+	 * Does the driver support the NIM module type?
+	 */
+	if (nim_ctx->nim_id != valid_nim_id) {
+		NT_LOG(ERR, NTHW,
+		       "%s: The driver does not support the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(nim_ctx->nim_id));
+		NT_LOG(DBG, NTHW,
+		       "%s: The driver supports the NIM module type %s\n",
+		       drv->mp_port_id_str[port], nim_id_to_text(valid_nim_id));
+		return -1;
+	}
+
+	if (enable) {
+		NT_LOG(DBG, ETHDEV, "%s: De-asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, false);
+	} else {
+		NT_LOG(DBG, ETHDEV, "%s: Asserting low power\n",
+		       drv->mp_port_id_str[port]);
+		nthw_gpio_phy_set_low_power(gpio_phy, (uint8_t)port, true);
+	}
+
+	return res;
+}
+
+/*
+ * Initialize one 100 Gbps port.
+ * The function shall not assume anything about the state of the adapter
+ * and/or port.
+ */
+static int port_init(adapter_info_t *drv, nt_fpga_t *fpga, int port)
+{
+	int adapter_id;
+	int hw_id;
+	int res;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+
+	nthw_mac_pcs_t *mac_pcs;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(link_info->variables_initialized);
+
+	if (fpga && fpga->p_fpga_info) {
+		adapter_id = fpga->p_fpga_info->n_nthw_adapter_id;
+		hw_id = fpga->p_fpga_info->nthw_hw_info.hw_id;
+	} else {
+		adapter_id = -1;
+		hw_id = -1;
+	}
+
+	mac_pcs = &link_info->u.var100g.mac_pcs100g[port];
+
+	/*
+	 * Phase 1. Pre-state machine (`port init` functions)
+	 * 1.1) Nt4gaAdapter::portInit()
+	 */
+
+	/* No adapter set-up here, only state variables */
+
+	/* 1.2) MyPort::init() */
+	link_info->link_info[port].link_speed = NT_LINK_SPEED_100G;
+	link_info->link_info[port].link_duplex = NT_LINK_DUPLEX_FULL;
+	link_info->link_info[port].link_auto_neg = NT_LINK_AUTONEG_OFF;
+	link_info->speed_capa |= NT_LINK_SPEED_100G;
+	nthw_mac_pcs_set_led_mode(mac_pcs, NTHW_MAC_PCS_LED_AUTO);
+	nthw_mac_pcs_set_receiver_equalization_mode(mac_pcs,
+					       nthw_mac_pcs_receiver_mode_lpm);
+
+	/*
+	 * NT200A01 build 2 HW and NT200A02 that require GTY polarity swap
+	 * if (adapter is `NT200A01 build 2 HW or NT200A02`)
+	 */
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A02 ||
+			(adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 2))
+		(void)swap_tx_rx_polarity(drv, mac_pcs, port, true);
+
+	nthw_mac_pcs_set_ts_eop(mac_pcs, true); /* end-of-frame timestamping */
+
+	/* Work in ABSOLUTE timing mode, don't set IFG mode. */
+
+	/* Phase 2. Pre-state machine (`setup` functions) */
+
+	/* 2.1) nt200a0x.cpp:Myport::setup() */
+	NT_LOG(DBG, ETHDEV, "%s: Setting up port %d\n", drv->mp_port_id_str[port],
+	       port);
+
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: PHY TX enable\n",
+	       drv->mp_port_id_str[port], port);
+	enable_tx(drv, mac_pcs);
+	reset_rx(drv, mac_pcs);
+
+	/* 2.2) Nt4gaPort::setup() */
+	if (nthw_gmf_init(NULL, fpga, port) == 0) {
+		nthw_gmf_t gmf;
+
+		if (nthw_gmf_init(&gmf, fpga, port) == 0)
+			nthw_gmf_set_enable(&gmf, true);
+	}
+
+	/* Phase 3. Link state machine steps */
+
+	/* 3.1) Create NIM, ::createNim() */
+	res = create_nim(drv, fpga, port, true);
+
+	if (res) {
+		NT_LOG(WRN, ETHDEV, "%s: NIM initialization failed\n",
+		       drv->mp_port_id_str[port]);
+		return res;
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: NIM initialized\n", drv->mp_port_id_str[port]);
+
+	/* 3.2) MyPort::nimReady() */
+
+	/* 3.3) MyPort::nimReady100Gb() */
+
+	/* Setting FEC resets the lane counter in one half of the GMF */
+	nthw_mac_pcs_set_fec(mac_pcs, true);
+	NT_LOG(DBG, ETHDEV, "%s: Port %d: HOST FEC enabled\n",
+	       drv->mp_port_id_str[port], port);
+
+	if (adapter_id == NT_HW_ADAPTER_ID_NT200A01 && hw_id == 1) {
+		const uint8_t tuning_s_r4[2][4][3] = { { { 8, 15, 8 },
+				{ 8, 15, 9 },
+				{ 7, 15, 9 },
+				{ 6, 15, 8 }
+			},
+			{	{ 6, 15, 8 },
+				{ 3, 15, 12 },
+				{ 7, 15, 9 },
+				{ 7, 15, 8 }
+			}
+		};
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			uint8_t pre, diff, post;
+
+			/* Use short-range tuning values */
+			pre = tuning_s_r4[port][lane][0];
+			diff = tuning_s_r4[port][lane][1];
+			post = tuning_s_r4[port][lane][2];
+
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else if ((adapter_id == NT_HW_ADAPTER_ID_NT200A02) ||
+			((adapter_id == NT_HW_ADAPTER_ID_NT200A01) &&
+			 (hw_id == 2))) {
+		const uint8_t pre = 5;
+		const uint8_t diff = 25;
+		const uint8_t post = 12;
+
+		uint8_t lane = 0;
+
+		for (lane = 0; lane < 4; lane++) {
+			nthw_mac_pcs_set_gty_tx_tuning(mac_pcs, lane, pre, diff,
+						  post);
+		}
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Unhandled AdapterId/HwId: %02x_hwid%d\n", __func__,
+		       adapter_id, hw_id);
+		assert(0);
+	}
+	reset_rx(drv, mac_pcs);
+
+	/*
+	 * 3.4) MyPort::setLinkState()
+	 *
+	 * Compensation = 1640 - dly
+	 * CMAC-core dly 188 ns
+	 * FEC no correction 87 ns
+	 * FEC active correction 211
+	 */
+	if (nthw_mac_pcs_get_fec_valid(mac_pcs))
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 211));
+
+	else
+		nthw_mac_pcs_set_timestamp_comp_rx(mac_pcs, (1640 - 188 - 87));
+
+	/* 3.5) uint32_t MyPort::macConfig(nt_link_state_t link_state) */
+	enable_rx(drv, mac_pcs);
+
+	nthw_mac_pcs_set_host_loopback(mac_pcs, false);
+
+	return res;
+}
+
+/*
+ * State machine shared between kernel and userland
+ */
+static int common_ptp_nim_state_machine(void *data)
+{
+	adapter_info_t *drv = (adapter_info_t *)data;
+	fpga_info_t *fpga_info = &drv->fpga_info;
+	nt4ga_link_t *link_info = &drv->nt4ga_link;
+	nt_fpga_t *fpga = fpga_info->mp_fpga;
+	const int adapter_no = drv->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	uint32_t last_lpbk_mode[NUM_ADAPTER_PORTS_MAX];
+
+	nim_i2c_ctx_t *nim_ctx;
+	link_state_t *link_state;
+	nthw_mac_pcs_t *mac_pcs;
+	nthw_gpio_phy_t *gpio_phy;
+
+	if (!fpga) {
+		NT_LOG(ERR, ETHDEV, "%s: fpga is NULL\n", drv->mp_adapter_id_str);
+		goto NT4GA_LINK_100G_MON_EXIT;
+	}
+
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+	nim_ctx = link_info->u.var100g.nim_ctx;
+	link_state = link_info->link_state;
+	mac_pcs = link_info->u.var100g.mac_pcs100g;
+	gpio_phy = link_info->u.var100g.gpio_phy;
+
+	monitor_task_is_running[adapter_no] = 1;
+	memset(last_lpbk_mode, 0, sizeof(last_lpbk_mode));
+
+	if (monitor_task_is_running[adapter_no]) {
+		NT_LOG(DBG, ETHDEV, "%s: link state machine running...\n",
+		       drv->mp_adapter_id_str);
+	}
+
+	while (monitor_task_is_running[adapter_no]) {
+		int i;
+		static bool reported_link[NUM_ADAPTER_PORTS_MAX] = { false };
+
+		/* Read sensors */
+		if (drv->adapter_sensors != NULL) {
+			nthw_spis_t *t_spi =
+				new_sensors_t_spi(drv->fpga_info.mp_fpga);
+			if (t_spi) {
+				for (struct nt_sensor_group *ptr =
+							drv->adapter_sensors;
+						ptr != NULL; ptr = ptr->next)
+					ptr->read(ptr, t_spi);
+				nthw_spis_delete(t_spi);
+			}
+		}
+
+		for (i = 0; i < nb_ports; i++) {
+			link_state_t new_link_state;
+			const bool is_port_disabled =
+				link_info->port_action[i].port_disable;
+			const bool was_port_disabled =
+				link_state[i].link_disabled;
+			const bool disable_port = is_port_disabled &&
+						  !was_port_disabled;
+			const bool enable_port = !is_port_disabled &&
+						 was_port_disabled;
+
+			if (!monitor_task_is_running[adapter_no])   /* stop quickly */
+				break;
+
+			/* Reading NIM sensors */
+			if (drv->nim_sensors[i] != NULL) {
+				nthw_spis_t *t_spi = new_sensors_t_spi(drv->fpga_info.mp_fpga);
+				if (t_spi) {
+					for (struct nim_sensor_group *ptr =
+								drv->nim_sensors[i];
+							ptr != NULL; ptr = ptr->next)
+						ptr->read(ptr, t_spi);
+					nthw_spis_delete(t_spi);
+				}
+			}
+
+			/* Has the administrative port state changed? */
+			assert(!(disable_port && enable_port));
+			if (disable_port) {
+				memset(&link_state[i], 0,
+				       sizeof(link_state[i]));
+				link_state[i].link_disabled = true;
+				reported_link[i] = false;
+				/* Turn off laser and LED, etc. */
+				(void)create_nim(drv, fpga, i, false);
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is disabled\n",
+				       drv->mp_port_id_str[i], i);
+				continue;
+			}
+
+			if (enable_port) {
+				link_state[i].link_disabled = false;
+				NT_LOG(DBG, ETHDEV, "%s: Port %i is enabled\n",
+				       drv->mp_port_id_str[i], i);
+			}
+
+			if (is_port_disabled)
+				continue;
+
+			if (link_info->port_action[i].port_lpbk_mode !=
+					last_lpbk_mode[i]) {
+				/* Loopback mode has changed. Do something */
+				if (!nim_is_present(&gpio_phy[i],
+						     (uint8_t)i)) {
+					/*
+					 * If there is no Nim present, we need to initialize the
+					 * port anyway
+					 */
+					port_init(drv, fpga, i);
+				}
+				NT_LOG(INF, ETHDEV,
+				       "%s: Loopback mode changed=%u\n",
+				       drv->mp_port_id_str[i],
+				       link_info->port_action[i].port_lpbk_mode);
+				set_loopback(drv, &mac_pcs[i], i,
+					     link_info->port_action[i].port_lpbk_mode,
+					     last_lpbk_mode[i]);
+				if (link_info->port_action[i].port_lpbk_mode ==
+						1)
+					link_state[i].link_up = true;
+				last_lpbk_mode[i] =
+					link_info->port_action[i].port_lpbk_mode;
+				continue;
+			}
+
+			(void)link_state_build(drv, &mac_pcs[i], &gpio_phy[i],
+						i, &new_link_state,
+						is_port_disabled);
+			if (!new_link_state.nim_present) {
+				if (link_state[i].nim_present) {
+					NT_LOG(INF, ETHDEV,
+					       "%s: NIM module removed\n",
+					       drv->mp_port_id_str[i]);
+				}
+				link_state[i] = new_link_state;
+				continue;
+			}
+
+			/* NIM module is present */
+			if (new_link_state.lh_nim_absent ||
+					!link_state[i].nim_present) {
+				sfp_nim_state_t new_state;
+
+				NT_LOG(DBG, ETHDEV, "%s: NIM module inserted\n",
+				       drv->mp_port_id_str[i]);
+
+				if (port_init(drv, fpga, i)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Failed to initialize NIM module\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				if (nim_state_build(&nim_ctx[i], &new_state)) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: Cannot read basic NIM data\n",
+					       drv->mp_port_id_str[i]);
+					continue;
+				}
+				assert(new_state.br); /* Cannot be zero if NIM is present */
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM id = %u (%s), br = %u, vendor = '%s', pn = '%s', sn='%s'\n",
+				       drv->mp_port_id_str[i], nim_ctx->nim_id,
+				       nim_id_to_text(nim_ctx->nim_id),
+				       (unsigned int)new_state.br,
+				       nim_ctx->vendor_name, nim_ctx->prod_no,
+				       nim_ctx->serial_no);
+
+				(void)link_state_build(drv, &mac_pcs[i],
+							&gpio_phy[i], i,
+							&link_state[i],
+							is_port_disabled);
+
+				NT_LOG(DBG, ETHDEV,
+				       "%s: NIM module initialized\n",
+				       drv->mp_port_id_str[i]);
+				continue;
+			}
+			if (reported_link[i] != new_link_state.link_up) {
+				NT_LOG(INF, ETHDEV, "%s: link is %s\n",
+				       drv->mp_port_id_str[i],
+				       (new_link_state.link_up ? "up" :
+					"down"));
+				link_state[i].link_up = new_link_state.link_up;
+				reported_link[i] = new_link_state.link_up;
+			}
+			check_link_state(drv, &mac_pcs[i]);
+		} /* end-for */
+		if (monitor_task_is_running[adapter_no])
+			NT_OS_WAIT_USEC(5 * 100000U); /* 5 x 0.1s = 0.5s */
+	}
+
+NT4GA_LINK_100G_MON_EXIT:
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: Stopped NT4GA 100 Gbps link monitoring thread.\n",
+	       drv->mp_adapter_id_str);
+
+	return 0;
+}
+
+/*
+ * Userland NIM state machine
+ */
+static void *nt4ga_link_100g_mon(void *data)
+{
+	(void)common_ptp_nim_state_machine(data);
+
+	return NULL;
+}
+
+/*
+ * Initialize all ports
+ * The driver calls this function during initialization (of the driver).
+ */
+int nt4ga_link_100g_ports_init(struct adapter_info_s *p_adapter_info,
+			       nt_fpga_t *fpga)
+{
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	const int adapter_no = p_adapter_info->adapter_no;
+	const int nb_ports = fpga_info->n_phy_ports;
+	int res = 0;
+
+	NT_LOG(DBG, ETHDEV, "%s: Initializing ports\n",
+	       p_adapter_info->mp_adapter_id_str);
+
+	/*
+	 * Initialize global variables
+	 */
+	assert(adapter_no >= 0 && adapter_no < NUM_ADAPTER_MAX);
+
+	if (res == 0 && !p_adapter_info->nt4ga_link.variables_initialized) {
+		nthw_mac_pcs_t *mac_pcs =
+			p_adapter_info->nt4ga_link.u.var100g.mac_pcs100g;
+		nim_i2c_ctx_t *nim_ctx =
+			p_adapter_info->nt4ga_link.u.var100g.nim_ctx;
+		nthw_gpio_phy_t *gpio_phy =
+			p_adapter_info->nt4ga_link.u.var100g.gpio_phy;
+		int i;
+
+		for (i = 0; i < nb_ports; i++) {
+			const uint8_t instance =
+				(uint8_t)(2U + i); /* 2 + adapter port number */
+			res = nthw_mac_pcs_init(&mac_pcs[i], fpga,
+					      i /* int nInstance */);
+			if (res != 0)
+				break;
+			res = nthw_iic_init(&nim_ctx[i].hwiic, fpga, instance,
+					   8 /* timing */);
+			if (res != 0)
+				break;
+			nim_ctx[i].instance = instance;
+			nim_ctx[i].devaddr = 0x50; /* 0xA0 / 2 */
+			nim_ctx[i].regaddr = 0U;
+			res = nthw_gpio_phy_init(&gpio_phy[i], fpga,
+					       0 /* Only one instance */);
+			if (res != 0)
+				break;
+		}
+		if (res == 0)
+			p_adapter_info->nt4ga_link.variables_initialized = true;
+	}
+
+	/* Create state-machine thread */
+	if (res == 0) {
+		if (!monitor_task_is_running[adapter_no]) {
+			res = pthread_create(&monitor_tasks[adapter_no], NULL,
+					     nt4ga_link_100g_mon, p_adapter_info);
+		}
+	}
+	return res;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_link_100g.h b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
new file mode 100644
index 0000000000..803b3454b7
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_link_100g.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_LINK_100G_H_
+#define NT4GA_LINK_100G_H_
+
+#include "nthw_drv.h"
+
+int nt4ga_link_100g_ports_init(adapter_info_t *p_adapter_info, nt_fpga_t *p_fpga);
+
+#endif /* NT4GA_LINK_100G_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
new file mode 100644
index 0000000000..5cbe7fcae9
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nt4ga_pci_ta_tg.h"
+#include "nthw_pci_ta.h"
+#include "nthw_pci_rd_tg.h"
+#include "nthw_pci_wr_tg.h"
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+	int res;
+	int n_err_cnt = 0;
+
+	if (p) {
+		memset(p, 0, sizeof(nt4ga_pci_ta_tg_t));
+	} else {
+		NT_LOG(ERR, NTHW, "%s: %s: null ptr\n", p_adapter_id_str, __func__);
+		return -1;
+	}
+
+	assert(p_fpga);
+
+	p->mp_nthw_pci_rd_tg = nthw_pci_rd_tg_new();
+	assert(p->mp_nthw_pci_rd_tg);
+	res = nthw_pci_rd_tg_init(p->mp_nthw_pci_rd_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_RD_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_wr_tg = nthw_pci_wr_tg_new();
+	assert(p->mp_nthw_pci_wr_tg);
+	res = nthw_pci_wr_tg_init(p->mp_nthw_pci_wr_tg, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_WR_TG not found\n",
+		       p_adapter_id_str);
+	}
+
+	p->mp_nthw_pci_ta = nthw_pci_ta_new();
+	assert(p->mp_nthw_pci_ta);
+	res = nthw_pci_ta_init(p->mp_nthw_pci_ta, p_fpga, 0);
+	if (res) {
+		n_err_cnt++;
+		NT_LOG(WRN, NTHW, "%s: module PCI_TA not found\n",
+		       p_adapter_id_str);
+	}
+
+	return n_err_cnt;
+}
+
+static int nt4ga_pci_ta_tg_ta_write_control_enable(nt4ga_pci_ta_tg_t *p,
+		uint32_t enable)
+{
+	nthw_pci_ta_set_control_enable(p->mp_nthw_pci_ta, enable);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_length_error(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_length_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_bad(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_bad(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_packet_good(nt4ga_pci_ta_tg_t *p, uint32_t *p_data)
+{
+	nthw_pci_ta_get_packet_good(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_ta_read_payload_error(nt4ga_pci_ta_tg_t *p,
+		uint32_t *p_data)
+{
+	nthw_pci_ta_get_payload_error(p->mp_nthw_pci_ta, p_data);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_rd_tg_set_ram_addr(p->mp_nthw_pci_rd_tg, slot_addr);
+	nthw_pci_rd_tg_set_phys_addr(p->mp_nthw_pci_rd_tg, n_phys_addr);
+	nthw_pci_rd_tg_set_ram_data(p->mp_nthw_pci_rd_tg, req_size, wait, wrap);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_rd_tg_set_run(p->mp_nthw_pci_rd_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_rd_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_rd_tg_get_ctrl_rdy(p->mp_nthw_pci_rd_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI RD TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_setup(nt4ga_pci_ta_tg_t *p, uint64_t iova,
+				    int slot_addr, uint32_t req_size, bool wait,
+				    bool wrap, bool inc)
+{
+	const uint64_t n_phys_addr = (iova + (unsigned long)(slot_addr * req_size));
+
+	nthw_pci_wr_tg_set_ram_addr(p->mp_nthw_pci_wr_tg, slot_addr);
+	nthw_pci_wr_tg_set_phys_addr(p->mp_nthw_pci_wr_tg, n_phys_addr);
+	nthw_pci_wr_tg_set_ram_data(p->mp_nthw_pci_wr_tg, req_size, wait, wrap, inc);
+
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_run(nt4ga_pci_ta_tg_t *p, uint32_t num_iterations)
+{
+	nthw_pci_wr_tg_set_run(p->mp_nthw_pci_wr_tg, num_iterations);
+	return 0;
+}
+
+static int nt4ga_pci_ta_tg_wr_tg_wait_ready(nt4ga_pci_ta_tg_t *p)
+{
+	int poll = 0;
+	uint32_t data = 0;
+
+	while (data == 0) {
+		/* NOTE: Deliberately start with a sleep - ensures that the FPGA pipe is empty */
+		NT_OS_WAIT_USEC(1000);
+		data = nthw_pci_wr_tg_get_ctrl_rdy(p->mp_nthw_pci_wr_tg);
+		poll++;
+		if (poll >= 1000) {
+			NT_LOG(ERR, NTHW,
+			       "%s: FAILED waiting PCI WR TG ready: poll=%d\n",
+			       __func__, poll);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla)
+{
+	nt4ga_pci_ta_tg_t *p = &p_adapter_info->nt4ga_pci_ta_tg;
+
+	const int delay = pri->n_tg_delay;
+	const int pkt_size = pri->n_tg_pkt_size;
+	const int num_pkts = pri->n_tg_num_pkts;
+	const int n_direction = pri->n_tg_direction;
+	const uint8_t n_numa_node = (uint8_t)pri->n_numa_node;
+	const int dma_buf_size = (4 * 1024 * 1024);
+
+	const size_t align_size = ALIGN_SIZE(dma_buf_size);
+	uint32_t *mem_addr;
+	uint64_t iova;
+
+	int bo_error = 0;
+
+	nthw_hif *p_root_instance = p_adapter_info->fpga_info.mp_nthw_hif;
+	nthw_hif *p_endpoint_instance = NULL;
+
+	nthw_pcie3 *p_pci_root = p_adapter_info->fpga_info.mp_nthw_pcie3;
+	nthw_pcie3 *p_pci_endpoint = NULL;
+
+	assert(p_root_instance || p_pci_root);
+
+	struct nt_dma_s *p_dma;
+	/* FPGA needs a Page alignment (4K on Intel) */
+	p_dma = nt_dma_alloc(align_size, 0x1000, n_numa_node);
+	if (p_dma == NULL) {
+		NT_LOG(DBG, ETHDEV, "%s: vfio_dma_alloc failed\n", __func__);
+		return 0;
+	}
+	mem_addr = (uint32_t *)p_dma->addr;
+	iova = p_dma->iova;
+
+	NT_LOG(DBG, NTHW,
+	       "%s: Running HIF bandwidth measurements on NUMA node %d\n",
+	       __func__, n_numa_node);
+
+	bo_error = 0;
+	{
+		int wrap;
+
+		/* Stop any existing running test */
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* Prepare the HIF Traffic generator */
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/*
+		 * Ensure that the hostbuffer memory contain data that can be read -
+		 * For this we will ask the FPGA to write data to it. The last wrap packet
+		 * does not generate any data it only wraps (unlike the PCIe2 TG)
+		 */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				if (pkt >= (num_pkts - 1))
+					wrap = 1;
+
+				else
+					wrap = 0;
+				bo_error |= nt4ga_pci_ta_tg_wr_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap, 1);
+				bo_error |= nt4ga_pci_ta_tg_rd_tg_setup(p, iova,
+									pkt, pkt_size,
+									0, wrap);
+			}
+		}
+
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Start WR TG Write once */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 1);
+		/* Wait until WR TG ready */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		/* Verify that we have a packet */
+		{
+			int pkt;
+
+			for (pkt = 0; pkt < num_pkts; pkt++) {
+				uint32_t value = 0;
+				int poll;
+
+				for (poll = 8; poll < pkt_size;
+						poll += 4, value++) {
+					if (*(uint32_t *)((uint8_t *)mem_addr +
+							  (pkt * pkt_size) +
+							  poll) != value) {
+						NT_LOG(ERR, NTHW,
+						       "HIF TG: Prepare failed. Data write failed: #%d.%d:  %016X:%08X\n",
+						       pkt, poll,
+						       *(uint32_t *)((uint8_t *)
+								     mem_addr +
+								     (pkt *
+								      pkt_size) +
+								     poll),
+						       value);
+
+						/*
+						 * Break out of the verification loop on first
+						 * Compare error
+						 */
+						bo_error |= 1;
+						break;
+					}
+				}
+			}
+		}
+
+		switch (n_direction) {
+		case 1: /* Read only test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			break;
+		case 2: /* Write only test */
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		case 3: /* Combined read/write test */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0xffff);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0xffff);
+			break;
+		default: /* stop tests */
+			nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+			nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+			break;
+		}
+
+		do {
+			/* prep */
+			if (p_pci_root) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_root,
+								    pri);
+			}
+			if (p_pci_endpoint) {
+				nthw_pcie3_end_point_counters_sample_pre(p_pci_endpoint,
+								    sla);
+			}
+
+			/* start measure */
+			if (p_root_instance)
+				nthw_hif_stat_req_enable(p_root_instance);
+			if (p_pci_root)
+				nthw_pcie3_stat_req_enable(p_pci_root);
+
+			if (p_endpoint_instance)
+				nthw_hif_stat_req_enable(p_endpoint_instance);
+			if (p_pci_endpoint)
+				nthw_pcie3_stat_req_enable(p_pci_endpoint);
+
+			/* Wait */
+			NT_OS_WAIT_USEC(delay);
+
+			/* Stop measure */
+			if (p_root_instance)
+				nthw_hif_stat_req_disable(p_root_instance);
+			if (p_pci_root)
+				nthw_pcie3_stat_req_disable(p_pci_root);
+
+			if (p_endpoint_instance)
+				nthw_hif_stat_req_disable(p_endpoint_instance);
+			if (p_pci_endpoint)
+				nthw_pcie3_stat_req_disable(p_pci_endpoint);
+
+			/* Post process root */
+			if (p_root_instance) {
+				nthw_hif_end_point_counters_sample(p_root_instance,
+							       pri);
+			}
+
+			if (p_pci_root) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_root,
+								     pri);
+			}
+
+			/* Post process endpoint */
+			if (p_endpoint_instance) {
+				nthw_hif_end_point_counters_sample(p_endpoint_instance,
+							       sla);
+			}
+
+			if (p_pci_endpoint) {
+				nthw_pcie3_end_point_counters_sample_post(p_pci_endpoint,
+								     sla);
+			}
+
+			{
+				/* Check for TA transmit errors */
+				uint32_t dw_good_pkts, dw_bad_pkts, dw_bad_length,
+					 dw_bad_payload;
+				nt4ga_pci_ta_tg_ta_read_packet_good(p,
+								 &dw_good_pkts);
+				nt4ga_pci_ta_tg_ta_read_packet_bad(p, &dw_bad_pkts);
+				nt4ga_pci_ta_tg_ta_read_length_error(p,
+								  &dw_bad_length);
+				nt4ga_pci_ta_tg_ta_read_payload_error(p, &dw_bad_payload);
+
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: Good pkts, Bad pkts, Bad length, Bad payload\n",
+				       __func__, n_numa_node);
+				NT_LOG(DBG, NTHW,
+				       "%s: NUMA node %u: HIF: TA: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				       __func__, n_numa_node, dw_good_pkts,
+				       dw_bad_pkts, dw_bad_length, dw_bad_payload);
+
+				if (dw_bad_pkts | dw_bad_length | dw_bad_payload) {
+					bo_error |= 1;
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: error detected\n",
+					       __func__, n_numa_node);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Good packets received: %u\n",
+					       __func__, n_numa_node, dw_good_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad packets received : %u\n",
+					       __func__, n_numa_node, dw_bad_pkts);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad length received  : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_length);
+					NT_LOG(ERR, NTHW,
+					       "%s: NUMA node %u: HIF: TA: Bad payload received : %u\n",
+					       __func__, n_numa_node,
+					       dw_bad_payload);
+				}
+			}
+
+			if (bo_error != 0)
+				break;
+
+			break; /* for now only loop once */
+
+			/*
+			 * Only do "signalstop" looping if a specific numa node and direction is to
+			 * be tested.
+			 */
+		} while ((bo_error == 0) && (n_numa_node != UINT8_MAX) &&
+				(n_direction != -1));
+
+		/* Stop the test */
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+		bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+		bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+		/* PCIe3 sanity checks */
+		{
+#if defined(DEBUG)
+			int do_loop = 1;
+#else
+			int do_loop = 0;
+#endif
+
+			while (do_loop) {
+				do_loop = 0;
+
+				if (p_root_instance) {
+					nthw_hif_stat_req_enable(p_root_instance);
+					NT_OS_WAIT_USEC(100);
+					nthw_hif_stat_req_disable(p_root_instance);
+				}
+
+				if (do_loop == 0)
+					break;
+
+				NT_LOG(DBG, NTHW,
+				       "%s: WARNING this is wrong - wait again\n",
+				       __func__);
+				NT_OS_WAIT_USEC(200 * 1000);
+			}
+		}
+	}
+
+	/* Stop the test */
+
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_wr_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_run(p, 0);
+	bo_error |= nt4ga_pci_ta_tg_rd_tg_wait_ready(p);
+
+	bo_error |= nt4ga_pci_ta_tg_ta_write_control_enable(p, 0);
+
+	nt_dma_free(p_dma);
+
+	return bo_error;
+}
+
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay)
+{
+	/* All numa nodes is indicated by UINT8_MAX */
+	const uint8_t numa_begin = (numa_node == UINT8_MAX ? 0 : numa_node);
+	const uint8_t numa_end = numa_begin;
+
+	/* sanity check direction param */
+	const int dir_begin = (direction <= 0 ? 1 : direction);
+	const int dir_end = (direction <= 0 ? 3 : direction);
+
+	int bo_error = 0;
+	struct nthw_hif_end_points eps;
+
+	if (n_delay == 0)
+		return -1;
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput:\n");
+
+	/* Only do "signalstop"-looping if a specific numa node is to be tested. */
+	{
+		uint8_t numa;
+
+		for (numa = numa_begin; numa <= numa_end; numa++) {
+			{
+				int by_loop;
+
+				for (by_loop = dir_begin; by_loop <= dir_end;
+						by_loop++) {
+					struct nthw_hif_end_point_counters *pri =
+							&eps.pri;
+					struct nthw_hif_end_point_counters *sla =
+							&eps.sla;
+
+					pri->n_numa_node = numa;
+					pri->n_tg_direction = by_loop;
+					pri->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					pri->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					pri->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					pri->cur_rx = 0;
+					pri->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					pri->bo_error = 0;
+
+					sla->n_numa_node = numa;
+					sla->n_tg_direction = by_loop;
+					sla->n_tg_pkt_size = (n_pkt_size > 0 ?
+							   n_pkt_size :
+							   TG_PKT_SIZE);
+					sla->n_tg_num_pkts =
+						(n_batch_count > 0 ?
+						 n_batch_count :
+						 TG_NUM_PACKETS);
+					sla->n_tg_delay = (n_delay > 0 ? n_delay :
+							 TG_DELAY);
+					sla->cur_rx = 0;
+					sla->cur_tx = 0;
+					pri->n_ref_clk_cnt = -1;
+					sla->bo_error = 0;
+
+					bo_error +=
+					nt4ga_pci_ta_tg_measure_throughput_run(p_adapter_info,
+									       pri, sla);
+#if defined(DEBUG) && (1)
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, pri->n_numa_node,
+						       pri->n_tg_direction,
+						       pri->n_tg_num_pkts,
+						       pri->n_tg_pkt_size,
+						       pri->n_tg_delay,
+						       pri->cur_rx, pri->cur_tx,
+						       (pri->cur_rx * 8UL /
+							1000000UL),
+						       (pri->cur_tx * 8UL /
+							1000000UL));
+					}
+					{
+						NT_LOG(DBG, NTHW,
+						       "%s: @ %d: %d %d %d %d: %016lX %016lX : %6ld Mbps %6ld Mbps\n",
+						       __func__, sla->n_numa_node,
+						       sla->n_tg_direction,
+						       sla->n_tg_num_pkts,
+						       sla->n_tg_pkt_size,
+						       sla->n_tg_delay,
+						       sla->cur_rx, sla->cur_tx,
+						       (sla->cur_rx * 8UL /
+							1000000UL),
+						       (sla->cur_tx * 8UL /
+							1000000UL));
+					}
+#endif
+
+					if (pri->bo_error != 0 || sla->bo_error != 0)
+						bo_error++;
+					if (bo_error)
+						break;
+				}
+			}
+		}
+	}
+
+	if (bo_error != 0) {
+		NT_LOG(ERR, NTHW, "%s: error during bandwidth measurement\n",
+		       __func__);
+	}
+
+	NT_LOG(DBG, NTHW, "HIF adapter throughput: done\n");
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] done\n", __func__, __FILE__, __LINE__);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
new file mode 100644
index 0000000000..8b46491f77
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_pci_ta_tg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NT4GA_PCI_TA_TG_H_
+#define _NT4GA_PCI_TA_TG_H_
+
+#include <stdint.h>
+
+#define TA_TG_DBG_SHOW_SUMMARY (1)
+
+#define TG_NUM_PACKETS (8)
+#define TG_PKT_SIZE (2048 * 1)
+#define TG_AREA_SIZE (TG_NUM_PACKETS * TG_PKT_SIZE)
+
+#define TG_DELAY (200000) /* usec */
+
+/* Struct predefinitions */
+struct adapter_info_s;
+struct nthw_hif_end_point_counters;
+
+struct nt4ga_pci_ta_tg_s {
+	struct nthw_pci_rd_tg *mp_nthw_pci_rd_tg;
+	struct nthw_pci_wr_tg *mp_nthw_pci_wr_tg;
+	struct nthw_pci_ta *mp_nthw_pci_ta;
+};
+
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg_t;
+typedef struct nt4ga_pci_ta_tg_s nt4ga_pci_ta_tg;
+
+int nt4ga_pci_ta_tg_init(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_pci_ta_tg_measure_throughput_run(struct adapter_info_s *p_adapter_info,
+				      struct nthw_hif_end_point_counters *pri,
+				      struct nthw_hif_end_point_counters *sla);
+int nt4ga_pci_ta_tg_measure_throughput_main(struct adapter_info_s *p_adapter_info,
+				       const uint8_t numa_node,
+				       const int direction, const int n_pkt_size,
+				       const int n_batch_count, const int n_delay);
+
+#endif /* _NT4GA_PCI_TA_TG_H_ */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
new file mode 100644
index 0000000000..b61c73ea12
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "nt4ga_adapter.h"
+
+#define NO_FLAGS 0
+
+/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */
+static inline uint64_t timestamp2ns(uint64_t ts)
+{
+	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
+}
+
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				   uint32_t *p_stat_dma_virtual);
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+		      nt4ga_stat_t *p_nt4ga_stat)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	if (p_nthw_stat->mb_is_vswitch) {
+		/*
+		 * Set all bits in the DMA block timestamp since 9530-42-05 and other Vswitch FPGA
+		 * images may only clear all bits in this memory location. TBV
+		 * Consequently, last_timestamp must be constructed via a system call.
+		 */
+		*p_nthw_stat->mp_timestamp = 0xFFFFFFFF;
+		p_nt4ga_stat->last_timestamp = NT_OS_GET_TIME_NS();
+		nt4ga_stat_collect_virt_v1_stats(p_nt4ga_stat,
+						p_nt4ga_stat->p_stat_dma_virtual);
+	} else {
+		p_nt4ga_stat->last_timestamp =
+			timestamp2ns(*p_nthw_stat->mp_timestamp);
+		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+					       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+	return 0;
+}
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt_fpga_t *p_fpga = fpga_info->mp_fpga;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat) {
+		memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t));
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	{
+		nthw_stat_t *p_nthw_stat = nthw_stat_new();
+		nthw_rmc_t *p_nthw_rmc = nthw_rmc_new();
+
+		if (!p_nthw_stat) {
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		if (!p_nthw_rmc) {
+			nthw_stat_delete(p_nthw_stat);
+
+			NT_LOG(ERR, ETHDEV, "%s: ERROR (%s:%d)", p_adapter_id_str,
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->mp_nthw_stat = p_nthw_stat;
+		nthw_stat_init(p_nthw_stat, p_fpga, 0);
+
+		p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc;
+		nthw_rmc_init(p_nthw_rmc, p_fpga, 0);
+
+		p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers;
+		p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers;
+
+		p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports;
+		p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
+{
+	const int n_physical_adapter_no _unused = p_adapter_info->adapter_no;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	nthw_rmc_t *p_nthw_rmc = p_nt4ga_stat->mp_nthw_rmc;
+
+	if (p_nthw_rmc)
+		nthw_rmc_block(p_nthw_rmc);
+
+	/* Allocate and map memory for fpga statistics */
+	{
+		uint32_t n_stat_size =
+			(uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) +
+				   sizeof(p_nthw_stat->mp_timestamp));
+		struct nt_dma_s *p_dma;
+		int numa_node = p_adapter_info->fpga_info.numa_node;
+
+		/* FPGA needs a 16K alignment on Statistics */
+		p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node);
+
+		if (!p_dma) {
+			NT_LOG(ERR, ETHDEV, "%s: pDma alloc failed\n",
+			       __func__);
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV, "%s: %x @%d %p %" PRIX64 " %" PRIX64 "\n", __func__,
+		       n_stat_size, numa_node, p_dma->addr, p_dma->iova);
+
+		NT_LOG(DBG, ETHDEV,
+		       "DMA: Physical adapter %02ld, PA = 0x%016" PRIX64
+		       " DMA = 0x%016" PRIX64 " size = 0x%" PRIX64 "\n",
+		       n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size);
+
+		p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr;
+		p_nt4ga_stat->n_stat_size = n_stat_size;
+		p_nt4ga_stat->p_stat_dma = p_dma;
+
+		memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size);
+		nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova,
+				       p_nt4ga_stat->p_stat_dma_virtual);
+	}
+
+	if (p_nthw_rmc)
+		nthw_rmc_unblock(p_nthw_rmc, false);
+
+	p_nt4ga_stat->mp_stat_structs_color = calloc(p_nthw_stat->m_nb_color_counters,
+						sizeof(struct color_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_color) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	p_nt4ga_stat->mp_stat_structs_hb =
+		calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers,
+		       sizeof(struct host_buffer_counters));
+	if (!p_nt4ga_stat->mp_stat_structs_hb) {
+		NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n", __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Separate memory allocation for VSWITCH and Inline to appropriate port counter structures.
+	 */
+	if (p_nthw_stat->mb_is_vswitch) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx =
+			calloc(p_nthw_stat->m_nb_rx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx =
+			calloc(p_nthw_stat->m_nb_tx_host_buffers,
+			       sizeof(struct port_counters_vswitch_v1));
+		if (!p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->flm_stat_ver = 0;
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	} else { /* Inline */
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx =
+			calloc(NUM_ADAPTER_PORTS_MAX,
+			       sizeof(struct port_counters_v2));
+		if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+
+		p_nt4ga_stat->flm_stat_ver = 0;
+
+		p_nt4ga_stat->mp_stat_structs_flm =
+			calloc(1, sizeof(struct flm_counters_v1));
+		if (!p_nt4ga_stat->mp_stat_structs_flm) {
+			NT_LOG(ERR, GENERAL, "Cannot allocate mem (%s:%d).\n",
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	memset(p_nt4ga_stat->a_stat_structs_color_base, 0,
+	       sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS);
+	p_nt4ga_stat->last_timestamp = 0;
+
+	nthw_stat_trigger(p_nthw_stat);
+
+	return 0;
+}
+
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_rx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_rx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_rx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx = NULL;
+	}
+
+	if (p_nt4ga_stat->virt.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->virt.mp_stat_structs_port_tx);
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx = NULL;
+	}
+	if (p_nt4ga_stat->cap.mp_stat_structs_port_tx) {
+		free(p_nt4ga_stat->cap.mp_stat_structs_port_tx);
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_color) {
+		free(p_nt4ga_stat->mp_stat_structs_color);
+		p_nt4ga_stat->mp_stat_structs_color = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_hb) {
+		free(p_nt4ga_stat->mp_stat_structs_hb);
+		p_nt4ga_stat->mp_stat_structs_hb = NULL;
+	}
+
+	if (p_nt4ga_stat->mp_stat_structs_flm) {
+		free(p_nt4ga_stat->mp_stat_structs_flm);
+		p_nt4ga_stat->mp_stat_structs_flm = NULL;
+	}
+
+	if (p_nt4ga_stat->p_stat_dma) {
+		nt_dma_free(p_nt4ga_stat->p_stat_dma);
+		p_nt4ga_stat->p_stat_dma = NULL;
+	}
+
+	return 0;
+}
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh)
+{
+	const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str;
+	fpga_info_t *fpga_info = &p_adapter_info->fpga_info;
+	nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat;
+	int i;
+
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		fprintf(pfh,
+			"%s: Intf %02d: Rx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 " Tx: %016" PRIX64 " %016" PRIX64
+			" %016" PRIX64 "\n",
+			p_adapter_id_str, i, p_nt4ga_stat->a_port_rx_packets_total[i],
+			p_nt4ga_stat->a_port_rx_octets_total[i],
+			p_nt4ga_stat->a_port_rx_drops_total[i],
+			p_nt4ga_stat->a_port_tx_packets_total[i],
+			p_nt4ga_stat->a_port_tx_octets_total[i],
+			p_nt4ga_stat->a_port_tx_drops_total[i]);
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+				    uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		const unsigned int tcp_flags_bits = 6U;
+		const uint32_t val_mask_dma = 0xffffffffULL >> tcp_flags_bits;
+
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2] & val_mask_dma;
+		p_nt4ga_stat->mp_stat_structs_color[c].tcp_flags |=
+			(uint8_t)(p_stat_dma_virtual[c * 2] >>
+				  (32 - tcp_flags_bits));
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	/* Host buffer counters */
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_rx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+
+		/* Rx totals */
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters];
+		p_nt4ga_stat->a_port_rx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->a_port_rx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	/* TX ports */
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->virt.mp_stat_structs_port_tx[p].qos_drop_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+
+		/* Tx totals */
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters];
+		p_nt4ga_stat->a_port_tx_packets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->a_port_tx_drops_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+	}
+
+	return 0;
+}
+
+/* Called with stat mutex locked */
+static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+					   uint32_t *p_stat_dma_virtual)
+{
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
+	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
+	int c, h, p;
+
+	if (!p_nthw_stat || !p_nt4ga_stat)
+		return -1;
+
+	if (p_nthw_stat->mn_stat_layout_version != 6) {
+		NT_LOG(ERR, ETHDEV, "HW STA module version not supported");
+		return -1;
+	}
+
+	/* RX ports */
+	for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) {
+		p_nt4ga_stat->mp_stat_structs_color[c].color_packets +=
+			p_stat_dma_virtual[c * 2];
+		p_nt4ga_stat->mp_stat_structs_color[c].color_bytes +=
+			p_stat_dma_virtual[c * 2 + 1];
+	}
+
+	/* Move to Host buffer counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters;
+
+	for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) {
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets +=
+			p_stat_dma_virtual[h * 8];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets +=
+			p_stat_dma_virtual[h * 8 + 1];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets +=
+			p_stat_dma_virtual[h * 8 + 2];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets +=
+			p_stat_dma_virtual[h * 8 + 3];
+		p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes +=
+			p_stat_dma_virtual[h * 8 + 4];
+		p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 5];
+		p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes +=
+			p_stat_dma_virtual[h * 8 + 6];
+		p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes +=
+			p_stat_dma_virtual[h * 8 + 7];
+	}
+
+	/* Move to Rx Port counters */
+	p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters;
+
+	/* RX ports */
+	for (p = 0; p < n_rx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  42] :
+			0;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop +=
+			p_nthw_stat->m_dbs_present ?
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					  47] :
+			0;
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52];
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53];
+
+		/* Rx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] +
+			(p_nthw_stat->m_dbs_present ?
+			 p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters +
+					   42] :
+			 0);
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_rx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0];
+		p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum;
+	}
+
+	/* Move to Tx Port counters */
+	p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters;
+
+	for (p = 0; p < n_tx_ports; p++) {
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1024_to_1518_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_1519_to_2047_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_2048_to_4095_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p]
+		.pkts_4096_to_8191_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22];
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23];
+
+		/* Tx totals */
+		uint64_t new_drop_events_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22];
+
+		uint64_t new_packets_sum =
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] +
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21];
+
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events +=
+			new_drop_events_sum;
+		p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum;
+
+		p_nt4ga_stat->a_port_tx_octets_total[p] +=
+			p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0];
+		p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum;
+		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.h b/drivers/net/ntnic/adapter/nt4ga_stat.h
new file mode 100644
index 0000000000..4a1067200c
--- /dev/null
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT4GA_STAT_H_
+#define NT4GA_STAT_H_
+
+#include "nt_util.h"
+#include "common_adapter_defs.h"
+
+#define NT_MAX_COLOR_FLOW_STATS 0x400
+
+struct color_counters {
+	uint64_t color_packets;
+	uint64_t color_bytes;
+	uint8_t tcp_flags;
+};
+
+struct host_buffer_counters {
+	uint64_t flush_packets;
+	uint64_t drop_packets;
+	uint64_t fwd_packets;
+	uint64_t dbs_drop_packets;
+	uint64_t flush_bytes;
+	uint64_t drop_bytes;
+	uint64_t fwd_bytes;
+	uint64_t dbs_drop_bytes;
+};
+
+struct port_counters_v2 {
+	/* Rx/Tx common port counters */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* FPGA counters */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+struct port_counters_vswitch_v1 {
+	/* Rx/Tx common port counters */
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+struct flm_counters_v1 {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	/* FLM 0.20 */
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+struct nt4ga_stat_s {
+	nthw_stat_t *mp_nthw_stat;
+	nthw_rmc_t *mp_nthw_rmc;
+	struct nt_dma_s *p_stat_dma;
+	uint32_t *p_stat_dma_virtual;
+	uint32_t n_stat_size;
+
+	uint64_t last_timestamp;
+
+	int mn_rx_host_buffers;
+	int mn_tx_host_buffers;
+
+	int mn_rx_ports;
+	int mn_tx_ports;
+
+	struct color_counters *mp_stat_structs_color;
+	/* For calculating increments between stats polls */
+	struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS];
+
+	union {
+		/*Port counters for VSWITCH/inline */
+		struct {
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_rx;
+			struct port_counters_vswitch_v1 *mp_stat_structs_port_tx;
+		} virt;
+		struct {
+			struct port_counters_v2 *mp_stat_structs_port_rx;
+			struct port_counters_v2 *mp_stat_structs_port_tx;
+		} cap;
+	};
+
+	struct host_buffer_counters *mp_stat_structs_hb;
+
+	int flm_stat_ver;
+	struct flm_counters_v1 *mp_stat_structs_flm;
+
+	/* Rx/Tx totals: */
+	uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */
+
+	uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	/* Base is for calculating increments between statistics reads */
+	uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX];
+
+	uint64_t a_port_tx_drops_base[NUM_ADAPTER_PORTS_MAX];
+	uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX];
+};
+
+typedef struct nt4ga_stat_s nt4ga_stat_t;
+
+int nt4ga_stat_init(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info);
+int nt4ga_stat_stop(struct adapter_info_s *p_adapter_info);
+
+int nt4ga_stat_dump(struct adapter_info_s *p_adapter_info, FILE *pfh);
+
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
+		      nt4ga_stat_t *p_nt4ga_stat);
+
+#endif /* NT4GA_STAT_H_ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 428fc7af98..2552b5d68d 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -10,22 +10,39 @@ endif
 # includes
 includes = [
     include_directories('.'),
+    include_directories('adapter'),
     include_directories('include'),
+    include_directories('nim'),
     include_directories('ntlog/include'),
     include_directories('ntutil/include'),
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('sensors'),
+    include_directories('sensors/avr_sensors'),
+    include_directories('sensors/board_sensors'),
+    include_directories('sensors/nim_sensors'),
+    include_directories('sensors/ntavr'),
 ]
 
 # all sources
 sources = files(
+    'adapter/nt4ga_adapter.c',
+    'adapter/nt4ga_link.c',
+    'adapter/nt4ga_link_100g.c',
+    'adapter/nt4ga_pci_ta_tg.c',
+    'adapter/nt4ga_stat.c',
+    'nim/i2c_nim.c',
+    'nim/nt_link_speed.c',
+    'nim/qsfp_sensors.c',
+    'nim/sfp_sensors.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
     'nthw/core/nthw_fpga_rst.c',
     'nthw/core/nthw_fpga_rst9563.c',
     'nthw/core/nthw_fpga_rst_nt200a0x.c',
+    'nthw/core/nthw_gmf.c',
     'nthw/core/nthw_gpio_phy.c',
     'nthw/core/nthw_hif.c',
     'nthw/core/nthw_iic.c',
@@ -35,6 +52,7 @@ sources = files(
     'nthw/core/nthw_pci_ta.c',
     'nthw/core/nthw_pci_wr_tg.c',
     'nthw/core/nthw_pcie3.c',
+    'nthw/core/nthw_rmc.c',
     'nthw/core/nthw_sdc.c',
     'nthw/core/nthw_si5340.c',
     'nthw/core/nthw_spi_v3.c',
@@ -50,6 +68,12 @@ sources = files(
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
     'ntutil/nt_util.c',
+    'sensors/avr_sensors/avr_sensors.c',
+    'sensors/board_sensors/board_sensors.c',
+    'sensors/board_sensors/tempmon.c',
+    'sensors/nim_sensors/nim_sensors.c',
+    'sensors/ntavr/ntavr.c',
+    'sensors/sensors.c',
 )
 
 if is_variable('default_cflags')
diff --git a/drivers/net/ntnic/nim/i2c_nim.c b/drivers/net/ntnic/nim/i2c_nim.c
new file mode 100644
index 0000000000..1c514d0300
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.c
@@ -0,0 +1,1974 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "i2c_nim.h"
+#include "ntlog.h"
+#include "nt_util.h"
+
+#include "nim_sensors.h"
+#include "sfp_p_registers.h"
+#include "qsfp_registers.h"
+#include "sfp_sensors.h"
+#include "qsfp_sensors.h"
+
+#include <assert.h>
+#include <string.h> /* memcmp, memset */
+
+/*
+ * Nim functions
+ */
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+#define NIM_READ false
+#define NIM_WRITE true
+#define NIM_PAGE_SEL_REGISTER 127
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+static bool sfp_is_supported_tri_speed_pn(char *prod_no)
+{
+	static const char *const pn_trispeed_list[] = {
+		"FCMJ-8521-3", "FCLF-8521-3", "FCLF8521P2BTL", "EOLT-C12-02A",
+		"AMXP-24RJS",  "ABCU-5710RZ", "ABCU-5740RZ",   "FCLF8522P2BTL",
+	};
+
+	/* Determine if copper SFP is supported 3-speed type */
+	for (size_t i = 0; i < ARRAY_SIZE(pn_trispeed_list); i++)
+		if (strcmp(pn_trispeed_list[i], prod_no) == 0)
+			return true;
+
+	return false;
+}
+
+static bool page_addressing(nt_nim_identifier_t id)
+{
+	switch (id) {
+	case NT_NIM_SFP_SFP_PLUS:
+		return false;
+	case NT_NIM_XFP:
+		return true;
+	case NT_NIM_QSFP:
+	case NT_NIM_QSFP_PLUS:
+	case NT_NIM_QSFP28:
+		return true;
+	default:
+		NT_LOG(DBG, ETHDEV, "%s: Unknown NIM identifier %d\n", __func__,
+		       id);
+		return false;
+	}
+}
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx)
+{
+	return (nt_nim_identifier_t)ctx->nim_id;
+}
+
+static int nim_read_write_i2c_data(nim_i2c_ctx_p ctx, bool do_write,
+				uint16_t lin_addr, uint8_t i2c_addr,
+				uint8_t reg_addr, uint8_t seq_cnt, uint8_t *p_data)
+{
+	/* Divide I2C_Addr by 2 because nthw_iic_read/writeData multiplies by 2 */
+	const uint8_t i2c_devaddr = i2c_addr / 2U;
+	(void)lin_addr; /* Unused */
+
+	if (do_write)
+		return nthw_iic_write_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					 seq_cnt, p_data);
+	else
+		return nthw_iic_read_data(&ctx->hwiic, i2c_devaddr, reg_addr,
+					seq_cnt, p_data);
+}
+
+/*
+ * ------------------------------------------------------------------------------
+ * Selects a new page for page addressing. This is only relevant if the NIM
+ * supports this. Since page switching can take substantial time the current page
+ * select is read and subsequently only changed if necessary.
+ * Important:
+ * XFP Standard 8077, Ver 4.5, Page 61 states that:
+ * If the host attempts to write a table select value which is not supported in
+ * a particular module, the table select byte will revert to 01h.
+ * This can lead to some surprising result that some pages seems to be duplicated.
+ * ------------------------------------------------------------------------------
+ */
+
+static int nim_setup_page(nim_i2c_ctx_p ctx, uint8_t page_sel)
+{
+	uint8_t curr_page_sel;
+
+	/* Read the current page select value */
+	if (nim_read_write_i2c_data(ctx, NIM_READ, NIM_PAGE_SEL_REGISTER,
+				 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+				 sizeof(curr_page_sel), &curr_page_sel) != 0)
+		return -1;
+
+	/* Only write new page select value if necessary */
+	if (page_sel != curr_page_sel) {
+		if (nim_read_write_i2c_data(ctx, NIM_WRITE, NIM_PAGE_SEL_REGISTER,
+					 nim_i2c_0xa0, NIM_PAGE_SEL_REGISTER,
+					 sizeof(page_sel), &page_sel) != 0)
+			return -1;
+	}
+	return 0;
+}
+
+static int nim_nim_read_write_data_lin(nim_i2c_ctx_p ctx, bool m_page_addressing,
+				   uint16_t lin_addr, uint16_t length,
+				   uint8_t *p_data, bool do_write)
+{
+	uint16_t i;
+	uint8_t reg_addr; /* The actual register address in I2C device */
+	uint8_t i2c_addr;
+	int block_size = 128; /* Equal to size of MSA pages */
+	int seq_cnt;
+	int max_seq_cnt = 1;
+	int multi_byte = 1; /* One byte per I2C register is default */
+	const int m_port_no = ctx->instance - 2;
+
+	if (lin_addr >= SFP_PHY_LIN_ADDR) {
+		/*
+		 * This represents an address space at I2C address 0xAC for SFP modules
+		 * containing a PHY. (eg 1G Copper SFP). Each register is 16bit and is
+		 * accessed MSByte first and this reading latches the LSByte that is
+		 * subsequently read from the same address.
+		 */
+		multi_byte = 2;
+		max_seq_cnt = 2;
+
+		/* Test for correct multibyte access */
+		if ((length % multi_byte) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Uneven length (%d) for address range [0x%X..0x%X].",
+			       m_port_no, __func__, length, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+
+		if (lin_addr + (length / 2) >
+				SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG) {
+			NT_LOG(ERR, ETHDEV,
+			       "Port %d: %s: Access above address range [0x%X..0x%X].",
+			       m_port_no, __func__, SFP_PHY_LIN_ADDR,
+			       SFP_PHY_LIN_ADDR + SFP_PHY_LIN_RNG - 1);
+			return -1;
+		}
+	} else if (lin_addr + length > 128) {
+		/*
+		 * Page addressing could be relevant since the last byte is outside the
+		 * basic range so check if it is enabled
+		 */
+		if (m_page_addressing) {
+			/* Crossing into the PHY address range is not allowed */
+			if (lin_addr + length > SFP_PHY_LIN_ADDR) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above paged address range [0..0x%X].",
+				       m_port_no, __func__, SFP_PHY_LIN_ADDR);
+				return -1;
+			}
+		} else {
+			/* Access outside 0xA2 address range not allowed */
+			if (lin_addr + length > 512) {
+				NT_LOG(ERR, ETHDEV,
+				       "Port %d: %s: Access above address range [0..511].",
+				       m_port_no, __func__);
+				return -1;
+			}
+		}
+	}
+	/* No missing else here - all devices supports access to address [0..127] */
+
+	for (i = 0; i < length;) {
+		bool use_page_select = false;
+
+		/*
+		 * Find out how much can be read from the current block in case of
+		 * single byte access
+		 */
+		if (multi_byte == 1)
+			max_seq_cnt = block_size - (lin_addr % block_size);
+
+		if (m_page_addressing) {
+			if (lin_addr >= 128) { /* Only page setup above this address */
+				use_page_select = true;
+
+				/* Map to [128..255] of 0xA0 device */
+				reg_addr = (uint8_t)(block_size +
+						    (lin_addr % block_size));
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+			}
+			i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+		} else {
+			if (lin_addr >= SFP_PHY_LIN_ADDR) {
+				/* Map to address [0..31] of 0xAC device */
+				reg_addr = (uint8_t)(lin_addr - SFP_PHY_LIN_ADDR);
+				i2c_addr = nim_i2c_0xac;
+			} else if (lin_addr >= 256) {
+				/* Map to address [0..255] of 0xA2 device */
+				reg_addr = (uint8_t)(lin_addr - 256);
+				i2c_addr = nim_i2c_0xa2;
+			} else {
+				reg_addr = (uint8_t)lin_addr;
+				i2c_addr = nim_i2c_0xa0; /* Base I2C address */
+			}
+		}
+
+		/* Now actually do the reading/writing */
+		seq_cnt = length - i; /* Number of remaining bytes */
+
+		if (seq_cnt > max_seq_cnt)
+			seq_cnt = max_seq_cnt;
+
+		/*
+		 * Read a number of bytes without explicitly specifying a new address.
+		 * This can speed up I2C access since automatic incrementation of the
+		 * I2C device internal address counter can be used. It also allows
+		 * a HW implementation, that can deal with block access.
+		 * Furthermore it also allows for access to data that must be accessed
+		 * as 16bit words reading two bytes at each address eg PHYs.
+		 */
+		if (use_page_select) {
+			if (nim_setup_page(ctx,
+					   (uint8_t)((lin_addr / 128) - 1)) != 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: Cannot set up page for linear address %u\n",
+				       __func__, lin_addr);
+				return -1;
+			}
+		}
+		if (nim_read_write_i2c_data(ctx, do_write, lin_addr, i2c_addr,
+					    reg_addr, (uint8_t)seq_cnt,
+					    p_data) != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Call to NIM_ReadWriteI2cData failed\n",
+			       __func__);
+			return -1;
+		}
+
+		p_data += seq_cnt;
+		i = (uint16_t)(i + seq_cnt);
+		lin_addr = (uint16_t)(lin_addr + (seq_cnt / multi_byte));
+	}
+	return 0;
+}
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_READ);
+}
+
+static int write_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+			void *data)
+{
+	return nim_nim_read_write_data_lin(ctx, page_addressing(ctx->nim_id),
+				       lin_addr, length, data, NIM_WRITE);
+}
+
+/* Read and return a single byte */
+static uint8_t read_byte(nim_i2c_ctx_p ctx, uint16_t addr)
+{
+	uint8_t data;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+	return data;
+}
+
+static int nim_read_id(nim_i2c_ctx_t *ctx)
+{
+	/* We are only reading the first byte so we don't care about pages here. */
+	const bool use_page_addressing = false;
+
+	if (nim_nim_read_write_data_lin(ctx, use_page_addressing,
+				    NIM_IDENTIFIER_ADDR, sizeof(ctx->nim_id),
+				    &ctx->nim_id, NIM_READ) != 0)
+		return -1;
+	return 0;
+}
+
+static int i2c_nim_common_construct(nim_i2c_ctx_p ctx)
+{
+	ctx->nim_id = 0;
+	int res = nim_read_id(ctx);
+
+	if (res) {
+		NT_LOG(ERR, PMD, "Can't read NIM id.");
+		return res;
+	}
+	memset(ctx->vendor_name, 0, sizeof(ctx->vendor_name));
+	memset(ctx->prod_no, 0, sizeof(ctx->prod_no));
+	memset(ctx->serial_no, 0, sizeof(ctx->serial_no));
+	memset(ctx->date, 0, sizeof(ctx->date));
+	memset(ctx->rev, 0, sizeof(ctx->rev));
+
+	ctx->content_valid = false;
+	memset(ctx->len_info, 0, sizeof(ctx->len_info));
+	ctx->pwr_level_req = 0;
+	ctx->pwr_level_cur = 0;
+	ctx->avg_pwr = false;
+	ctx->tx_disable = false;
+	ctx->lane_idx = -1;
+	ctx->lane_count = 1;
+	ctx->options = 0;
+	return 0;
+}
+
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data);
+
+#define XSFP_READ_VENDOR_INFO(x)                                             \
+	static void x##sfp_read_vendor_info(nim_i2c_ctx_t *ctx)              \
+	{                                                                    \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_NAME_LIN_ADDR,      \
+				      sizeof(ctx->vendor_name),               \
+				      ctx->vendor_name);                      \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_PN_LIN_ADDR,        \
+				      sizeof(ctx->prod_no), ctx->prod_no);     \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_SN_LIN_ADDR,        \
+				      sizeof(ctx->serial_no), ctx->serial_no); \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_DATE_LIN_ADDR,      \
+				      sizeof(ctx->date), ctx->date);         \
+		nim_read_vendor_info(ctx, Q##SFP_VENDOR_REV_LIN_ADDR,       \
+				      (uint8_t)(sizeof(ctx->rev) - 2),       \
+				      ctx->rev); /*OBS Only two bytes*/      \
+	}
+
+XSFP_READ_VENDOR_INFO()
+XSFP_READ_VENDOR_INFO(q)
+
+static int sfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res;
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	res = nthw_iic_read_data(&ctx->hwiic, ctx->devaddr, SFP_BIT_RATE_ADDR,
+			       sizeof(state->br), &state->br);
+	return res;
+}
+
+static int qsfp_nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	int res = 0; /* unused due to no readings from HW */
+
+	assert(ctx && state);
+	assert(ctx->nim_id != NT_NIM_UNKNOWN && "Nim is not initialized");
+
+	(void)memset(state, 0, sizeof(*state));
+
+	switch (ctx->nim_id) {
+	case 12U:
+		state->br = 10U; /* QSFP: 4 x 1G = 4G */
+		break;
+	case 13U:
+		state->br = 103U; /* QSFP+: 4 x 10G = 40G */
+		break;
+	case 17U:
+		state->br = 255U; /* QSFP28: 4 x 25G = 100G */
+		break;
+	default:
+		NT_LOG(INF, PMD,
+		       "%s:%d nim_id = %u is not an QSFP/QSFP+/QSFP28 module\n",
+		       __func__, __LINE__, ctx->nim_id);
+		res = -1;
+	}
+
+	return res;
+}
+
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS)
+		return sfp_nim_state_build(ctx, state);
+	else
+		return qsfp_nim_state_build(ctx, state);
+}
+
+const char *nim_id_to_text(uint8_t nim_id)
+{
+	switch (nim_id) {
+	case 0x0:
+		return "UNKNOWN";
+	case 0x1:
+		return "GBIC";
+	case 0x2:
+		return "FIXED";
+	case 0x3:
+		return "SFP/SFP+";
+	case 0x04:
+		return "300 pin XBI";
+	case 0x05:
+		return "XEN-PAK";
+	case 0x06:
+		return "XFP";
+	case 0x07:
+		return "XFF";
+	case 0x08:
+		return "XFP-E";
+	case 0x09:
+		return "XPAK";
+	case 0x0A:
+		return "X2";
+	case 0x0B:
+		return "DWDM";
+	case 0x0C:
+		return "QSFP";
+	case 0x0D:
+		return "QSFP+";
+	case 0x11:
+		return "QSFP28";
+	case 0x12:
+		return "CFP4";
+	default:
+		return "ILLEGAL!";
+	}
+}
+
+/*
+ * Read and check the validity of the NIM basic data.
+ * This will also preload the cache
+ */
+static void check_content_valid(nim_i2c_ctx_p ctx, uint16_t start_addr)
+{
+	uint32_t sum = 0;
+	uint8_t buf[96];
+
+	read_data_lin(ctx, start_addr, sizeof(buf), &buf[0]);
+
+	for (int i = 0; i < 63; i++)
+		sum += buf[i];
+
+	if ((sum & 0xFF) != buf[63]) {
+		ctx->content_valid = false;
+	} else {
+		sum = 0;
+
+		for (int i = 64; i < 95; i++)
+			sum += buf[i];
+
+		ctx->content_valid = ((sum & 0xFF) == buf[95]);
+	}
+	if (ctx->content_valid)
+		NT_LOG(DBG, NTHW, "NIM content validation passed");
+	else
+		NT_LOG(WRN, NTHW, "NIM content validation failed");
+}
+
+/*
+ * Set/reset Soft Rate__select bits (RS0 & RS1)
+ */
+static void nim_sfp_set_rate_sel_high(nim_i2c_ctx_p ctx, bool rx_rate_high,
+				  bool tx_rate_high)
+{
+	const bool m_page_addressing = page_addressing(ctx->nim_id);
+	uint8_t data;
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (rx_rate_high)
+		data |= SFP_SOFT_RATE0_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE0_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_CONTROL_STATUS_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+
+	/* Read the Extended Status/Control and set/reset Soft RS1 bit */
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_READ);
+
+	if (tx_rate_high)
+		data |= SFP_SOFT_RATE1_BIT;
+	else
+		data &= (uint8_t)~(SFP_SOFT_RATE1_BIT);
+
+	nim_nim_read_write_data_lin(ctx, m_page_addressing,
+				SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(data),
+				&data, NIM_WRITE);
+}
+
+/*
+ * Some NIM modules requires some changes to a rate setting.
+ */
+static int nim_sfp_set_rate_select(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if ((speed & (int)ctx->speed_mask) == 0) {
+		char buf[128];
+
+		NT_LOG(ERR, ETHDEV, "%s - Speed (%s) not within SpeedMask (%s)",
+		       nt_translate_link_speed(speed),
+		       nt_translate_link_speed_mask(ctx->speed_mask, buf,
+						 sizeof(buf)));
+		return -1;
+	}
+
+	if (ctx->specific_u.sfp.dual_rate) {
+		uint64_t req_speed = nt_get_link_speed(speed);
+		uint64_t other_speed =
+			nt_get_link_speed((nt_link_speed_t)(ctx->speed_mask ^ (uint32_t)speed));
+		bool rate_high = req_speed > other_speed;
+		/*
+		 * Do this both for 1/10 and 10/25. For Sfp28 it is not known if
+		 * this is necessary but it is believed not to do any harm.
+		 */
+		nim_sfp_set_rate_sel_high(ctx, rate_high, rate_high);
+	}
+	return 0;
+}
+
+/*
+ * Disable TX laser.
+ */
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable)
+{
+	int res;
+	uint8_t value;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_READ);
+	if (res != 0)
+		return res;
+
+	if (disable)
+		value |= SFP_SOFT_TX_DISABLE_BIT;
+	else
+		value &= (uint8_t)~SFP_SOFT_TX_DISABLE_BIT;
+
+	res = nim_nim_read_write_data_lin(ctx, pg_addr, SFP_CONTROL_STATUS_LIN_ADDR,
+				      sizeof(value), &value, NIM_WRITE);
+
+	return res;
+}
+
+/*
+ * Disable laser for specific lane or all lanes
+ */
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable,
+				       int lane_idx)
+{
+	uint8_t value;
+	uint8_t mask;
+	const bool pg_addr = page_addressing(ctx->nim_id);
+
+	if (lane_idx < 0) /* If no lane is specified then all lanes */
+		mask = QSFP_SOFT_TX_ALL_DISABLE_BITS;
+	else
+		mask = (uint8_t)(1U << lane_idx);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	if (disable)
+		value |= mask;
+	else
+		value &= (uint8_t)~mask;
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_CONTROL_STATUS_LIN_ADDR,
+				    sizeof(value), &value, NIM_WRITE) != 0)
+		return -1;
+	return 0;
+}
+
+/*
+ * Read vendor information at a certain address. Any trailing whitespace is
+ * removed and a missing string termination in the NIM data is handled.
+ */
+static int nim_read_vendor_info(nim_i2c_ctx_p ctx, uint16_t addr,
+				 uint8_t max_len, char *p_data)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	int i;
+	/* Subtract "1" from maxLen that includes a terminating "0" */
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, addr, (uint8_t)(max_len - 1),
+				    (uint8_t *)p_data, NIM_READ) != 0)
+		return -1;
+
+	/* Terminate at first found white space */
+	for (i = 0; i < max_len - 1; i++) {
+		if (*p_data == ' ' || *p_data == '\n' || *p_data == '\t' ||
+				*p_data == '\v' || *p_data == '\f' || *p_data == '\r') {
+			*p_data = '\0';
+			return 0;
+		}
+
+		p_data++;
+	}
+
+	/*
+	 * Add line termination as the very last character, if it was missing in the
+	 * NIM data
+	 */
+	*p_data = '\0';
+	return 0;
+}
+
+/*
+ * Import length info in various units from NIM module data and convert to meters
+ */
+static void nim_import_len_info(nim_i2c_ctx_p ctx, uint8_t *p_nim_len_info,
+				uint16_t *p_nim_units)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->len_info); i++)
+		if (*(p_nim_len_info + i) == 255) {
+			ctx->len_info[i] = 65535;
+		} else {
+			uint32_t len = *(p_nim_len_info + i) * *(p_nim_units + i);
+
+			if (len > 65535)
+				ctx->len_info[i] = 65535;
+			else
+				ctx->len_info[i] = (uint16_t)len;
+		}
+}
+
+static int qsfpplus_read_basic_data(nim_i2c_ctx_t *ctx)
+{
+	const bool pg_addr = page_addressing(ctx->nim_id);
+	uint8_t options;
+	uint8_t value;
+	uint8_t nim_len_info[5];
+	uint16_t nim_units[5] = { 1000, 2, 1, 1,
+				 1
+			       }; /* QSFP MSA units in meters */
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	NT_LOG(DBG, ETHDEV, "Instance %d: NIM id: %s (%d)\n", ctx->instance,
+	       nim_id_to_text(ctx->nim_id), ctx->nim_id);
+
+	/* Read DMI options */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_DMI_OPTION_LIN_ADDR,
+				    sizeof(options), &options, NIM_READ) != 0)
+		return -1;
+	ctx->avg_pwr = options & QSFP_DMI_AVG_PWR_BIT;
+	NT_LOG(DBG, ETHDEV,
+	       "Instance %d: NIM options: (DMI: Yes, AvgPwr: %s)\n",
+	       ctx->instance, yes_no[ctx->avg_pwr]);
+
+	qsfp_read_vendor_info(ctx);
+	NT_LOG(DBG, PMD,
+	       "Instance %d: NIM info: (Vendor: %s, PN: %s, SN: %s, Date: %s, Rev: %s)\n",
+	       ctx->instance, ctx->vendor_name, ctx->prod_no, ctx->serial_no,
+	       ctx->date, ctx->rev);
+
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_SUP_LEN_INFO_LIN_ADDR,
+				    sizeof(nim_len_info), nim_len_info,
+				    NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	nim_import_len_info(ctx, nim_len_info, nim_units);
+
+	/* Read required power level */
+	if (nim_nim_read_write_data_lin(ctx, pg_addr, QSFP_EXTENDED_IDENTIFIER,
+				    sizeof(value), &value, NIM_READ) != 0)
+		return -1;
+
+	/*
+	 * Get power class according to SFF-8636 Rev 2.7, Table 6-16, Page 43:
+	 * If power class >= 5 setHighPower must be called for the module to be fully
+	 * functional
+	 */
+	if ((value & QSFP_POWER_CLASS_BITS_5_7) == 0) {
+		/* NIM in power class 1 - 4 */
+		ctx->pwr_level_req =
+			(uint8_t)(((value & QSFP_POWER_CLASS_BITS_1_4) >> 6) +
+				  1);
+	} else {
+		/* NIM in power class 5 - 7 */
+		ctx->pwr_level_req =
+			(uint8_t)((value & QSFP_POWER_CLASS_BITS_5_7) + 4);
+	}
+
+	return 0;
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_speed_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, NTHW, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+/*
+ * Select a speed that is supported for a multi rate module. The possible speed
+ * values must be obtained by setSpeedMask().
+ * Currently rate selection is assumed to be between 40Gb (10GBd) and 100G (25Gbd)
+ * The value in () are the baud rates for PAM-4 and are valid for extended rate
+ * select, version 2.
+ */
+static int qsfp28_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	const uint8_t rx_rate_sel_addr = 87;
+	const uint8_t tx_rate_sel_addr = 88;
+
+	if (ctx->lane_idx < 0) {
+		/*
+		 * All lanes together
+		 * The condition below indicates that the module supports rate selection
+		 */
+		if (ctx->speed_mask == (uint32_t)(NT_LINK_SPEED_40G | NT_LINK_SPEED_100G)) {
+			uint16_t data;
+
+			if (speed == NT_LINK_SPEED_100G) {
+				data = 0xAAAA;
+			} else if (speed == NT_LINK_SPEED_40G) {
+				data = 0x0000;
+			} else {
+				NT_LOG(ERR, NTHW, "Unhandled NIM speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+
+			/* Set speed for Rx and Tx on all lanes */
+			write_data_lin(ctx, rx_rate_sel_addr, sizeof(data), &data);
+			write_data_lin(ctx, tx_rate_sel_addr, sizeof(data), &data);
+		} else {
+			/* For ordinary modules only this speed is supported */
+			if (speed != NT_LINK_SPEED_100G) {
+				NT_LOG(ERR, NTHW,
+				       "NIM cannot select this speed (%s).",
+				       nt_translate_link_speed(speed));
+				return -1;
+			}
+		}
+	} else {
+		/*
+		 * Individual lanes
+		 * Currently we do not support QSFP28 modules that support rate selection when
+		 * running on individual lanes but that might change in the future
+		 */
+		if (speed != NT_LINK_SPEED_25G) {
+			NT_LOG(ERR, NTHW,
+			       "NIM cannot select this lane speed (%s).",
+			       nt_translate_link_speed(speed));
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed)
+{
+	if (translate_nimid(ctx) == NT_NIM_SFP_SFP_PLUS) {
+		return nim_sfp_set_rate_select(ctx, speed);
+	} else if (translate_nimid(ctx) == NT_NIM_QSFP28) {
+		if (qsfp28_is_speed_selection_enabled(ctx))
+			return qsfp28_set_link_speed(ctx, speed);
+
+		return 0; /* NIM picks up the speed automatically */
+	}
+	NT_LOG(ERR, ETHDEV,
+	       "%s nim is not supported for adjustable link speed.",
+	       nim_id_to_text(ctx->nim_id));
+	return -1;
+}
+
+/*
+ * Reads basic vendor and DMI information.
+ */
+static int sfp_read_basic_data(nim_i2c_ctx_p ctx)
+{
+	const char *yes_no[2] _unused = { "No", "Yes" };
+
+	check_content_valid(ctx, 0);
+	NT_LOG(DBG, PMD, "NIM id: %s (%d)", nim_id_to_text(ctx->nim_id),
+	       ctx->nim_id);
+
+	/* Read DMI options */
+	uint8_t options;
+
+	read_data_lin(ctx, SFP_DMI_OPTION_LIN_ADDR, sizeof(options), &options);
+	ctx->avg_pwr = options & SFP_DMI_AVG_PWR_BIT;
+	ctx->dmi_supp = options & SFP_DMI_IMPL_BIT;
+	ctx->specific_u.sfp.ext_cal = options & SFP_DMI_EXT_CAL_BIT;
+	ctx->specific_u.sfp.addr_chg = options & SFP_DMI_ADDR_CHG_BIT;
+
+	if (ctx->dmi_supp) {
+		ctx->options |=
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	}
+
+	if (ctx->dmi_supp) {
+		NT_LOG(DBG, PMD,
+		       "NIM options: (DMI: %s, AvgPwr: %s, ExtCal: %s, AddrChg: %s)",
+		       yes_no[ctx->dmi_supp], yes_no[ctx->avg_pwr],
+		       yes_no[ctx->specific_u.sfp.ext_cal],
+		       yes_no[ctx->specific_u.sfp.addr_chg]);
+	} else {
+		NT_LOG(DBG, PMD, "NIM options: DMI not supported");
+	}
+	/* Read enhanced options */
+	read_data_lin(ctx, SFP_ENHANCED_OPTIONS_LIN_ADDR, sizeof(options),
+		    &options);
+	ctx->tx_disable = options & SFP_SOFT_TX_DISABLE_IMPL_BIT;
+
+	if (ctx->tx_disable)
+		ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+	sfp_read_vendor_info(ctx);
+
+	uint8_t nim_len_info[5];
+
+	read_data_lin(ctx, SFP_SUP_LEN_INFO_LIN_ADDR, sizeof(nim_len_info),
+		    nim_len_info);
+
+	/*
+	 * Returns supported length information in meters for various fibers as 5 indivi-
+	 * dual values: [SM(9um), EBW(50um), MM(50um), MM(62.5um), Copper]
+	 * If no length information is available for a certain entry, the returned value
+	 * will be zero. This will be the case for SFP modules - EBW entry.
+	 * If the MSBit is set the returned value in the lower 31 bits indicates that the
+	 * supported length is greater than this.
+	 */
+
+	uint16_t nim_units[5] = { 1000, 100, 10, 10,
+				 1
+			       }; /* SFP MSA units in meters */
+	nim_import_len_info(ctx, &nim_len_info[0], &nim_units[0]);
+
+	if (ctx->len_info[0] != 0 || ctx->len_info[1] != 0) {
+		/*
+		 * Make sure that for SFP modules the supported length for SM fibers
+		 * which is given in both km and 100m units is are equal to the greatest
+		 * value.
+		 * The following test will also be valid if NIM_LEN_MAX has been set!
+		 */
+		if (ctx->len_info[1] > ctx->len_info[0])
+			ctx->len_info[0] = ctx->len_info[1];
+
+		ctx->len_info[1] = 0; /* EBW is not supported for SFP */
+	}
+
+	read_data_lin(ctx, SFP_OPTION0_LIN_ADDR, sizeof(options), &options);
+
+	if (options & SFP_POWER_LEVEL2_REQ_BIT)
+		ctx->pwr_level_req = 2;
+	else
+		ctx->pwr_level_req = 1;
+
+	ctx->pwr_level_cur = 1;
+
+	if (ctx->pwr_level_req == 2) {
+		/* Read the current power level status */
+		read_data_lin(ctx, SFP_EXT_CTRL_STAT0_LIN_ADDR, sizeof(options),
+			    &options);
+
+		if (options & SFP_POWER_LEVEL2_GET_BIT)
+			ctx->pwr_level_cur = 2;
+		else
+			ctx->pwr_level_cur = 1;
+	}
+	return 0;
+}
+
+/*
+ * Read the vendor product number and from this determine which QSFP DMI options
+ * that are present. This list also covers QSFP28 modules.
+ * This function should be used if automatic detection does not work.
+ */
+static bool qsfpplus_get_qsfp_options_from_pn(nim_i2c_ctx_p ctx)
+{
+	if (strcmp(ctx->prod_no, "FTL410QE1C") == 0) {
+		/* FINISAR FTL410QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_TX_BIAS) | (1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTL410QE2C") == 0) {
+		/* FINISAR FTL410QE2C, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP) |
+			       (1 << NIM_OPTION_SUPPLY);
+	} else if (strcmp(ctx->prod_no, "FTL4C1QE1C") == 0) {
+		/* FINISAR FTL4C1QE1C, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z") == 0) {
+		/*
+		 * AFBR-79E4Z: The digital diagnostic accuracy is not guaranteed so only
+		 * the mandatory temperature sensor is made available (although it will
+		 * also be inaccurate)
+		 */
+		/* AVAGO 79E4Z, QSFP+ */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+	} else if (strcmp(ctx->prod_no, "AFBR-79E4Z-D") == 0) {
+		/* AVAGO 79E4Z-D, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EQDZ") == 0) {
+		/* AVAGO 79EQDZ, QSFP+ */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBRZ") == 0) {
+		/*
+		 * Avago RxOnly BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		/* SFF-8436_rev4.1, p67 */
+		ctx->options = (1 << NIM_OPTION_RX_ONLY);
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ-NU1") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-79EBPZ") == 0) {
+		/*
+		 * Avago RxTx BiDi NIM
+		 * No sensors available not even the normally mandatory temp sensor and this
+		 * is ok since the temp sensor is not mandatory on active optical modules
+		 */
+		ctx->options = 0;
+	} else if (strcmp(ctx->prod_no, "AFBR-89CDDZ") == 0) {
+		/* AVAGO 89CDDZ, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BDDZ") == 0) {
+		/* AVAGO 89BDDZ, QSFP28, BiDi */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "AFBR-89BRDZ") == 0) {
+		/*
+		 * AVAGO 89BRDZ, QSFP28, BiDi, RxOnly
+		 * but sensors have been set as above except for Tx sensors
+		 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_RX_ONLY);
+		/*
+		 * According to mail correspondence AFBR-89BRDZ is a RxOnly version of
+		 * AFBR-89BDDZ with lasers default off.
+		 * The lasers can be turned on however but should probably not because the
+		 * receivers might be degraded, and this is the cause for selling them as RxOnly.
+		 */
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01P") == 0) {
+		/* Sumitomo SQF1000L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1000L4LNGG01B") == 0) {
+		/* Sumitomo SQF1000L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01P") == 0) {
+		/* Sumitomo SQF1001L4LNGG01P, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1001L4LNGG01B") == 0) {
+		/* Sumitomo SQF1001L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "SQF1002L4LNGG01B") == 0) {
+		/* Sumitomo SQF1002L4LNGG01B, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/171") == 0) {
+		/* Fujitsu FIM37700/171, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FIM37700/172") == 0) {
+		/* Fujitsu FIM37700/172, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC85S-NVS") == 0) {
+		/* InnoLight TR-FC85S-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "TR-FC13L-NVS") == 0) {
+		/* InnoLight TR-FC13L-NVS, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9551REPM") == 0) {
+		/* Finisar FTLC9551REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else if (strcmp(ctx->prod_no, "FTLC9558REPM") == 0) {
+		/* Finisar FTLC9558REPM, QSFP28 */
+		ctx->options =
+			(1 << NIM_OPTION_TEMP) | (1 << NIM_OPTION_SUPPLY) |
+			(1 << NIM_OPTION_RX_POWER) | (1 << NIM_OPTION_TX_BIAS) |
+			(1 << NIM_OPTION_TX_POWER);
+	} else {
+		/*
+		 * DO NOTE: The temperature sensor is not mandatory on active/passive copper
+		 * and active optical modules
+		 */
+		ctx->options = (1 << NIM_OPTION_TEMP);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Try to figure out if a sensor is present by reading its value(s) and its limits.
+ * This is a highly impirical way that cannot be guaranteed to give the correct
+ * result but it was a wish not to be dependent on a PN table based solution.
+ */
+static void qsfpplus_find_qsfp_sensor_option(nim_i2c_ctx_p ctx,
+		uint16_t value_addr,
+		uint8_t lane_count,
+		uint16_t limit_addr, bool two_compl,
+		uint32_t sensor_option)
+{
+	uint8_t data[8];
+	int i, j;
+	int value;
+	int value_list[4];
+	int limit;
+	int limit_list[4];
+	bool present;
+
+	/* Read current value(s) */
+	read_data_lin(ctx, value_addr, (uint16_t)(lane_count * 2), data);
+
+	for (j = 0; j < lane_count; j++) {
+		value = 0;
+
+		for (i = 0; i < 2; i++) {
+			value = value << 8;
+			value += data[2 * j + i];
+		}
+
+		if (two_compl && value >= 0x8000)
+			value = value - 0x10000;
+
+		value_list[j] = value;
+	}
+
+	/* Read limits Warning high/low Alarm high/low 4 values each two bytes */
+	read_data_lin(ctx, limit_addr, 8, data);
+
+	for (j = 0; j < 4; j++) {
+		limit = 0;
+
+		for (i = 0; i < 2; i++) {
+			limit = limit << 8;
+			limit += data[2 * j + i];
+		}
+
+		if (two_compl && limit >= 0x8000)
+			limit = limit - 0x10000;
+
+		limit_list[j] = limit;
+	}
+
+	/* Find out if limits contradicts each other */
+	int alarm_high = limit_list[0];
+	int alarm_low = limit_list[1];
+	int warn_high = limit_list[2];
+	int warn_low = limit_list[3];
+
+	bool alarm_limits = false; /* Are they present - that is both not zero */
+	bool warn_limits = false;
+	bool limit_conflict = false;
+
+	if (alarm_high != 0 || alarm_low != 0) {
+		alarm_limits = true;
+
+		if (alarm_high <= alarm_low)
+			limit_conflict = true;
+	}
+
+	if (warn_high != 0 || warn_low != 0) {
+		warn_limits = true;
+
+		/* Warning limits must be least restrictive */
+		if (warn_high <= warn_low)
+			limit_conflict = true;
+		else if ((warn_high > alarm_high) || (warn_low < alarm_low))
+			limit_conflict = true;
+	}
+
+	/* Try to deduce if the sensor is present or not */
+	present = false;
+
+	if (limit_conflict) {
+		present = false;
+	} else if (warn_limits ||
+		 alarm_limits) { /* Is one or both present and not contradictory */
+		present = true;
+	} else {
+		/*
+		 * All limits are zero - look at the sensor value
+		 * If one sensor is non-zero the sensor is set to be present
+		 */
+		for (j = 0; j < lane_count; j++) {
+			if (value_list[j] != 0) {
+				present = true;
+				break;
+			}
+		}
+
+		/*
+		 * If all limits and values are zero then present will be false here. In this
+		 * case it is assumed that the sensor is not present:
+		 * Experience indicates that for QSFP+ modules RxPwr will be non-zero even with
+		 * no optical input. QSFP28 modules however can easily have RxPwr equal to zero
+		 * with no optical input.
+		 * For all investigated modules it was found that if RxPwr is implemented then
+		 * the limits are also set. This is not always the case with TxBias and TxPwr
+		 * but here the measured values will be non-zero when the laser is on what it
+		 * will be just after initialization since it has no external hardware disable.
+		 */
+	}
+
+	if (present)
+		ctx->options |= (1U << sensor_option);
+}
+
+/*
+ * Find active QSFP sensors.
+ */
+static void qsfpplus_get_qsfp_options_from_data(nim_i2c_ctx_p ctx)
+{
+	ctx->options = 0;
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TEMP_LIN_ADDR, 1,
+					 QSFP_TEMP_THRESH_LIN_ADDR, true,
+					 NIM_OPTION_TEMP);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_VOLT_LIN_ADDR, 1,
+					 QSFP_VOLT_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_SUPPLY);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_RX_PWR_LIN_ADDR, 4,
+					 QSFP_RX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_RX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_PWR_LIN_ADDR, 4,
+					 QSFP_TX_PWR_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_POWER);
+
+	qsfpplus_find_qsfp_sensor_option(ctx, QSFP_TX_BIAS_LIN_ADDR, 4,
+					 QSFP_BIAS_THRESH_LIN_ADDR, false,
+					 NIM_OPTION_TX_BIAS);
+}
+
+static void sfp_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	uint16_t bit_rate_nom;
+	uint8_t connector;
+	uint8_t gig_eth_comp;
+	uint8_t dmi_opt;
+	uint8_t fiber_chan_tx_tech;
+	unsigned int len_sm;
+	unsigned int len_mm_50um;
+	unsigned int len_mm_62_5um;
+
+	ctx->specific_u.sfp.sfp28 = false;
+
+	/* gigEthComp: */
+	static const uint8_t eth_1000_b_t = 1 << 3;
+	static const uint8_t eth_1000_b_sx = 1 << 0;
+	static const uint8_t eth_1000_b_lx = 1 << 1;
+
+	/* fiberChanTxTech: */
+	static const uint8_t cu_passive = 1 << 2;
+	static const uint8_t cu_active = 1 << 3;
+
+	/* dmiOpt: */
+	static const uint8_t dd_present = 1 << 6;
+
+	/* connector: */
+	static const uint8_t cu_pig_tail = 0x21;
+
+	ctx->port_type = NT_PORT_TYPE_SFP_NOT_RECOGNISED;
+
+	read_data_lin(ctx, 12, sizeof(data), &data);
+	bit_rate_nom = (uint16_t)(data * 100);
+
+	read_data_lin(ctx, 2, sizeof(connector), &connector);
+	read_data_lin(ctx, 6, sizeof(gig_eth_comp), &gig_eth_comp);
+	read_data_lin(ctx, 92, sizeof(dmi_opt), &dmi_opt);
+	read_data_lin(ctx, 8, sizeof(fiber_chan_tx_tech), &fiber_chan_tx_tech);
+
+	read_data_lin(ctx, 15, sizeof(data), &data);
+	len_sm = (unsigned int)data * 100; /* Unit is 100m */
+
+	read_data_lin(ctx, 16, sizeof(data), &data);
+	len_mm_50um = (unsigned int)data * 10; /* Unit is 10m */
+
+	read_data_lin(ctx, 17, sizeof(data), &data);
+	len_mm_62_5um = (unsigned int)data * 10; /* Unit is 10m */
+
+	/* First find out if it is a SFP or a SFP+ NIM */
+	if (bit_rate_nom == 0) {
+		/*
+		 * A Nominal bit rate of zero indicates that it has not been defined and must
+		 * be deduced from transceiver technology
+		 */
+		ctx->specific_u.sfp.sfpplus = !(gig_eth_comp & eth_1000_b_t);
+	} else if (bit_rate_nom == 25500) {
+		/* SFF-8024 - 4.4 Extended Specification Compliance References */
+		read_data_lin(ctx, 36, sizeof(data), &data);
+
+		if (data == 0x02)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_SR;
+		else if (data == 0x03)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_LR;
+		else if (data == 0x0B)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_L;
+		else if (data == 0x0C)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_S;
+		else if (data == 0x0D)
+			ctx->port_type = NT_PORT_TYPE_SFP_28_CR_CA_N;
+		else
+			ctx->port_type = NT_PORT_TYPE_SFP_28;
+
+		ctx->specific_u.sfp.sfp28 = true;
+		ctx->specific_u.sfp.sfpplus = true;
+
+		/*
+		 * Allowlist of 25G transceivers known to also support 10G.
+		 * There is no way to inquire about this capability.
+		 */
+		if ((strcmp(ctx->prod_no, "TR-PZ85S-N00") == 0) ||
+				(strcmp(ctx->prod_no, "TR-PZ13L-N00") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF8536P4BCV") == 0) ||
+				(strcmp(ctx->prod_no, "FTLF1436P4BCV") == 0)) {
+			ctx->specific_u.sfp.dual_rate = true;
+
+			/* Change the port type for dual rate modules */
+			if (ctx->port_type == NT_PORT_TYPE_SFP_28_SR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_SR_DR;
+			else if (ctx->port_type == NT_PORT_TYPE_SFP_28_LR)
+				ctx->port_type = NT_PORT_TYPE_SFP_28_LR_DR;
+		}
+
+		return;
+	}
+	ctx->specific_u.sfp.sfpplus = (bit_rate_nom >= 10000);
+	/* Then find sub-types of each */
+	if (ctx->specific_u.sfp.sfpplus) {
+		if (fiber_chan_tx_tech & cu_active) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC;
+		} else if (fiber_chan_tx_tech & cu_passive) {
+			if (connector == cu_pig_tail)
+				ctx->port_type =
+					NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC;
+			else
+				ctx->port_type = NT_PORT_TYPE_SFP_PLUS_CU;
+		} else {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS;
+		}
+		if (gig_eth_comp & (eth_1000_b_sx | eth_1000_b_lx)) {
+			ctx->port_type = NT_PORT_TYPE_SFP_PLUS_DUAL_RATE;
+			ctx->specific_u.sfp.dual_rate = true;
+		}
+
+		read_data_lin(ctx, 65, sizeof(data), &data);
+		/* Test hard RATE_SELECT bit */
+		ctx->specific_u.sfp.hw_rate_sel = ((data & (1 << 5)) != 0);
+
+		read_data_lin(ctx, 93, sizeof(data), &data);
+		/* Test soft RATE_SELECT bit */
+		ctx->specific_u.sfp.sw_rate_sel = ((data & (1 << 3)) != 0);
+	} else { /* SFP */
+		/* 100M */
+		if (bit_rate_nom != 0 && bit_rate_nom < 1000) {
+			ctx->port_type = NT_PORT_TYPE_SFP_FX;
+		/* 1G */
+		} else {
+			ctx->specific_u.sfp.cu_type = false;
+			if (gig_eth_comp & eth_1000_b_sx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			} else if (gig_eth_comp & eth_1000_b_lx) {
+				ctx->port_type = NT_PORT_TYPE_SFP_LX;
+			} else if (gig_eth_comp & eth_1000_b_t) {
+				ctx->specific_u.sfp.tri_speed =
+					sfp_is_supported_tri_speed_pn(ctx->prod_no);
+
+				if (ctx->specific_u.sfp.tri_speed) {
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED;
+				} else {
+					ctx->port_type = NT_PORT_TYPE_SFP_CU;
+				}
+				ctx->specific_u.sfp.cu_type = true;
+			} else {
+				/*
+				 * Not all modules report their ethernet compliance correctly so use
+				 * length indicators
+				 */
+				if (len_sm > 0)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX;
+				else if ((len_mm_50um > 0) || (len_mm_62_5um > 0))
+					ctx->port_type = NT_PORT_TYPE_SFP_SX;
+			}
+
+			/* Add Diagnostic Data suffix if necessary */
+			if (dmi_opt & dd_present) {
+				if (ctx->port_type == NT_PORT_TYPE_SFP_SX)
+					ctx->port_type = NT_PORT_TYPE_SFP_SX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_LX)
+					ctx->port_type = NT_PORT_TYPE_SFP_LX_DD;
+				else if (ctx->port_type == NT_PORT_TYPE_SFP_CU)
+					ctx->port_type = NT_PORT_TYPE_SFP_CU_DD;
+				else if (ctx->port_type ==
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED)
+					ctx->port_type =
+						NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD;
+			}
+		}
+	}
+}
+
+
+static void sfp_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->specific_u.sfp.sfp28) {
+		ctx->speed_mask = NT_LINK_SPEED_25G; /* Default for SFP28 */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_10G;
+	} else if (ctx->specific_u.sfp.sfpplus) {
+		ctx->speed_mask = NT_LINK_SPEED_10G; /* Default for SFP+ */
+		if (ctx->specific_u.sfp.dual_rate)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+		if (ctx->port_type == NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC)
+			ctx->speed_mask |= NT_LINK_SPEED_1G;
+	} else { /* SFP */
+		if (ctx->port_type == NT_PORT_TYPE_SFP_FX) {
+			ctx->speed_mask = NT_LINK_SPEED_100M;
+		} else {
+			ctx->speed_mask = NT_LINK_SPEED_1G; /* Default for SFP */
+			if (ctx->specific_u.sfp.dual_rate ||
+					ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_100M;
+			if (ctx->specific_u.sfp.tri_speed)
+				ctx->speed_mask |= NT_LINK_SPEED_10M;
+		}
+	}
+	if (ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_L ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_S ||
+			ctx->port_type == NT_PORT_TYPE_SFP_28_CR_CA_N) {
+		/* Enable multiple speed setting for SFP28 DAC cables */
+		ctx->speed_mask = (NT_LINK_SPEED_25G | NT_LINK_SPEED_10G |
+				  NT_LINK_SPEED_1G);
+	}
+}
+
+static void qsfp28_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t fiber_chan_speed;
+
+	/* Table 6-17 SFF-8636 */
+	read_data_lin(ctx, QSFP_SPEC_COMPLIANCE_CODES_ADDR, 1, &fiber_chan_speed);
+
+	if (fiber_chan_speed & (1 << 7)) {
+		/* SFF-8024, Rev 4.7, Table 4-4 */
+		uint8_t extended_specification_compliance_code = 0;
+
+		read_data_lin(ctx, QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR, 1,
+			    &extended_specification_compliance_code);
+
+		switch (extended_specification_compliance_code) {
+		case 0x02:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_SR4;
+			break;
+		case 0x03:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR4;
+			break;
+		case 0x0B:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_L;
+			break;
+		case 0x0C:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_S;
+			break;
+		case 0x0D:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_CR_CA_N;
+			break;
+		case 0x25:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_DR;
+			break;
+		case 0x26:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_FR;
+			break;
+		case 0x27:
+			ctx->port_type = NT_PORT_TYPE_QSFP28_LR;
+			break;
+		default:
+			ctx->port_type = NT_PORT_TYPE_QSFP28;
+		}
+	} else {
+		ctx->port_type = NT_PORT_TYPE_QSFP28;
+	}
+}
+
+/*
+ * If true the user must actively select the desired rate. If false the module
+ * however can still support several rates without the user is required to select
+ * one of them. Supported rates must then be deduced from the product number.
+ * SFF-8636, Rev 2.10a:
+ * p40: 6.2.7 Rate Select
+ * p85: A.2 Rate Select
+ */
+static bool qsfp28_is_rate_selection_enabled(nim_i2c_ctx_p ctx)
+{
+	const uint8_t ext_rate_select_compl_reg_addr = 141;
+	const uint8_t options_reg_addr = 195;
+	const uint8_t enh_options_reg_addr = 221;
+
+	uint8_t rate_select_ena = (read_byte(ctx, options_reg_addr) >> 5) &
+				0x01; /* bit: 5 */
+
+	if (rate_select_ena == 0)
+		return false;
+
+	uint8_t rate_select_type = (read_byte(ctx, enh_options_reg_addr) >> 2) &
+				 0x03; /* bit 3..2 */
+
+	if (rate_select_type != 2) {
+		NT_LOG(DBG, PMD, "NIM has unhandled rate select type (%d)",
+		       rate_select_type);
+		return false;
+	}
+
+	uint8_t ext_rate_select_ver = read_byte(ctx, ext_rate_select_compl_reg_addr) &
+				   0x03; /* bit 1..0 */
+
+	if (ext_rate_select_ver != 0x02) {
+		NT_LOG(DBG, PMD,
+		       "NIM has unhandled extended rate select version (%d)",
+		       ext_rate_select_ver);
+		return false;
+	}
+
+	return true; /* When true selectRate() can be used */
+}
+
+static void qsfp28_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	if (ctx->port_type == NT_PORT_TYPE_QSFP28_FR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_DR ||
+			ctx->port_type == NT_PORT_TYPE_QSFP28_LR) {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask =
+				0; /* PAM-4 modules can only run on all lanes together */
+	} else {
+		if (ctx->lane_idx < 0)
+			ctx->speed_mask = NT_LINK_SPEED_100G;
+		else
+			ctx->speed_mask = NT_LINK_SPEED_25G;
+
+		if (qsfp28_is_rate_selection_enabled(ctx)) {
+			/*
+			 * It is assumed that if the module supports dual rates then the other rate
+			 * is 10G per lane or 40G for all lanes.
+			 */
+			if (ctx->lane_idx < 0)
+				ctx->speed_mask |= NT_LINK_SPEED_40G;
+			else
+				ctx->speed_mask = NT_LINK_SPEED_10G;
+		}
+	}
+}
+
+static void qsfpplus_find_port_params(nim_i2c_ctx_p ctx)
+{
+	uint8_t device_tech;
+
+	read_data_lin(ctx, QSFP_TRANSMITTER_TYPE_LIN_ADDR, sizeof(device_tech),
+		    &device_tech);
+
+	switch (device_tech & 0xF0) {
+	case 0xA0: /* Copper cable unequalized */
+	case 0xB0: /* Copper cable passive equalized */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PASSIVE_DAC;
+		break;
+	case 0xC0: /* Copper cable, near and far end limiting active equalizers */
+	case 0xD0: /* Copper cable, far end limiting active equalizers */
+	case 0xE0: /* Copper cable, near end limiting active equalizers */
+	case 0xF0: /* Copper cable, linear active equalizers */
+		ctx->port_type = NT_PORT_TYPE_QSFP_ACTIVE_DAC;
+		break;
+	default: /* Optical */
+		ctx->port_type = NT_PORT_TYPE_QSFP_PLUS;
+		break;
+	}
+}
+
+static void qsfpplus_set_speed_mask(nim_i2c_ctx_p ctx)
+{
+	ctx->speed_mask = (ctx->lane_idx < 0) ? NT_LINK_SPEED_40G :
+			 (NT_LINK_SPEED_10G);
+}
+
+static int sfp_preinit(nim_i2c_ctx_p ctx)
+{
+	int res = sfp_read_basic_data(ctx);
+
+	if (!res) {
+		sfp_find_port_params(ctx);
+		sfp_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfpplus_construct(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	assert(lane_idx < 4);
+	ctx->specific_u.qsfp.qsfp28 = false;
+	ctx->lane_idx = lane_idx;
+	ctx->lane_count = 4;
+}
+
+static int qsfpplus_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	qsfpplus_construct(ctx, lane_idx);
+	int res = qsfpplus_read_basic_data(ctx);
+
+	if (!res) {
+		qsfpplus_find_port_params(ctx);
+		/*
+		 * If not on the known modules list try to figure out which sensors that are present
+		 */
+		if (!qsfpplus_get_qsfp_options_from_pn(ctx)) {
+			NT_LOG(DBG, NTHW,
+			       "NIM options not known in advance - trying to detect");
+			qsfpplus_get_qsfp_options_from_data(ctx);
+		}
+
+		/*
+		 * Read if TX_DISABLE has been implemented
+		 * For passive optical modules this is required while it for copper and active
+		 * optical modules is optional. Under all circumstances register 195.4 will
+		 * indicate, if TX_DISABLE has been implemented in register 86.0-3
+		 */
+		uint8_t value;
+
+		read_data_lin(ctx, QSFP_OPTION3_LIN_ADDR, sizeof(value), &value);
+
+		ctx->tx_disable = (value & QSFP_OPTION3_TX_DISABLE_BIT) != 0;
+
+		if (ctx->tx_disable)
+			ctx->options |= (1 << NIM_OPTION_TX_DISABLE);
+
+		/*
+		 * Previously - considering AFBR-89BRDZ - code tried to establish if a module was
+		 * RxOnly by testing the state of the lasers after reset. Lasers were for this
+		 * module default disabled.
+		 * However that code did not work for GigaLight, GQS-MPO400-SR4C so it was
+		 * decided that this option should not be detected automatically but from PN
+		 */
+		ctx->specific_u.qsfp.rx_only =
+			(ctx->options & (1 << NIM_OPTION_RX_ONLY)) != 0;
+		qsfpplus_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void qsfp28_wait_for_ready_after_reset(nim_i2c_ctx_p ctx)
+{
+	uint8_t data;
+	bool init_complete_flag_present = false;
+
+	/*
+	 * Revision compliance
+	 * 7: SFF-8636 Rev 2.5, 2.6 and 2.7
+	 * 8: SFF-8636 Rev 2.8, 2.9 and 2.10
+	 */
+	read_data_lin(ctx, 1,
+		      sizeof(ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance),
+		      &ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+	NT_LOG(DBG, NTHW, "NIM RevCompliance = %d",
+	       ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance);
+
+	/* Wait if lane_idx == -1 (all lanes are used) or lane_idx == 0 (the first lane) */
+	if (ctx->lane_idx > 0)
+		return;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.rev_compliance >= 7) {
+		/* Check if init complete flag is implemented */
+		read_data_lin(ctx, 221, sizeof(data), &data);
+		init_complete_flag_present = (data & (1 << 4)) != 0;
+	}
+
+	NT_LOG(DBG, NTHW, "NIM InitCompleteFlagPresent = %d",
+	       init_complete_flag_present);
+
+	/*
+	 * If the init complete flag is not present then wait 500ms that together with 500ms
+	 * after reset (in the adapter code) should be enough to read data from upper pages
+	 * that otherwise would not be ready. Especially BiDi modules AFBR-89BDDZ have been
+	 * prone to this when trying to read sensor options using getQsfpOptionsFromData()
+	 * Probably because access to the paged address space is required.
+	 */
+	if (!init_complete_flag_present) {
+		NT_OS_WAIT_USEC(500000);
+		return;
+	}
+
+	/* Otherwise wait for the init complete flag to be set */
+	int count = 0;
+
+	while (true) {
+		if (count > 10) { /* 1 s timeout */
+			NT_LOG(WRN, NTHW, "Timeout waiting for module ready");
+			break;
+		}
+
+		read_data_lin(ctx, 6, sizeof(data), &data);
+
+		if (data & 0x01) {
+			NT_LOG(DBG, NTHW, "Module ready after %dms",
+			       count * 100);
+			break;
+		}
+
+		NT_OS_WAIT_USEC(100000); /* 100 ms */
+		count++;
+	}
+}
+
+static void qsfp28_get_fec_options(nim_i2c_ctx_p ctx)
+{
+	const char *const nim_list[] = {
+		"AFBR-89BDDZ", /* Avago BiDi */
+		"AFBR-89BRDZ", /* Avago BiDi, RxOnly */
+		"FTLC4352RKPL", /* Finisar QSFP28-LR */
+		"FTLC4352RHPL", /* Finisar QSFP28-DR */
+		"FTLC4352RJPL", /* Finisar QSFP28-FR */
+		"SFBR-89BDDZ-CS4", /* Foxconn, QSFP28 100G/40G BiDi */
+	};
+
+	for (size_t i = 0; i < ARRAY_SIZE(nim_list); i++) {
+		if (ctx->prod_no == nim_list[i]) {
+			ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+			ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ena =
+				true;
+			NT_LOG(DBG, NTHW, "Found FEC info via PN list");
+			return;
+		}
+	}
+
+	/*
+	 * For modules not in the list find FEC info via registers
+	 * Read if the module has controllable FEC
+	 * SFF-8636, Rev 2.10a TABLE 6-28 Equalizer, Emphasis, Amplitude and Timing)
+	 * (Page 03h, Bytes 224-229)
+	 */
+	uint8_t data;
+	uint16_t addr = 227 + 3 * 128;
+
+	read_data_lin(ctx, addr, sizeof(data), &data);
+
+	/* Check if the module has FEC support that can be controlled */
+	ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl =
+		(data & (1 << 6)) != 0;
+	ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl =
+		(data & (1 << 7)) != 0;
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.media_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_MEDIA_SIDE_FEC);
+
+	if (ctx->specific_u.qsfp.specific_u.qsfp28.host_side_fec_ctrl)
+		ctx->options |= (1 << NIM_OPTION_HOST_SIDE_FEC);
+}
+
+static int qsfp28_preinit(nim_i2c_ctx_p ctx, int8_t lane_idx)
+{
+	int res = qsfpplus_preinit(ctx, lane_idx);
+
+	if (!res) {
+		qsfp28_wait_for_ready_after_reset(ctx);
+		memset(&ctx->specific_u.qsfp.specific_u.qsfp28, 0,
+		       sizeof(ctx->specific_u.qsfp.specific_u.qsfp28));
+		ctx->specific_u.qsfp.qsfp28 = true;
+		qsfp28_find_port_params(ctx);
+		qsfp28_get_fec_options(ctx);
+		qsfp28_set_speed_mask(ctx);
+	}
+	return res;
+}
+
+static void sfp_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+				  struct nim_sensor_group **nim_sensors_ptr,
+				  uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+	*nim_sensors_cnt = 0;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(sfp_sensors_level0[0].name) == 0) {
+		if (ctx->specific_u.sfp.sfp28) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP28",
+				sizeof(sfp_sensors_level0[0].name));
+		} else if (ctx->specific_u.sfp.sfpplus) {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP+",
+				sizeof(sfp_sensors_level0[0].name));
+		} else {
+			rte_strscpy(sfp_sensors_level0[0].name, "SFP",
+				sizeof(sfp_sensors_level0[0].name));
+		}
+	}
+
+	/* allocate temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no,
+							       ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &sfp_sensors_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_sfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[1]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_bias_current;
+	(*nim_sensors_cnt)++;
+
+	/* tx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[2]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_tx_power;
+	(*nim_sensors_cnt)++;
+
+	/* rx power */
+	sensor->next = allocate_nim_sensor_group(m_port_no,
+						 ctx,
+						 NT_SENSOR_SOURCE_PORT,
+						 &sfp_sensors_level1[3]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_sfp_rx_power;
+	(*nim_sensors_cnt)++;
+}
+
+static void
+qsfp_plus_nim_add_all_sensors(uint8_t m_port_no, nim_i2c_ctx_t *ctx,
+			   struct nim_sensor_group **nim_sensors_ptr,
+			   uint16_t *nim_sensors_cnt)
+{
+	struct nim_sensor_group *sensor = NULL;
+
+	if (ctx == NULL || nim_sensors_ptr == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	/*
+	 * If the user has not provided a name for the temperature sensor then apply
+	 * one automatically
+	 */
+	if (strlen(qsfp_sensor_level0[0].name) == 0) {
+		if (ctx->specific_u.qsfp.qsfp28)
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP28",
+				sizeof(qsfp_sensor_level0[0].name));
+		else
+			rte_strscpy(qsfp_sensor_level0[0].name, "QSFP+",
+				sizeof(qsfp_sensor_level0[0].name));
+	}
+
+	/* temperature sensor */
+	nim_sensors_ptr[m_port_no] = allocate_nim_sensor_group(m_port_no, ctx,
+							       NT_SENSOR_SOURCE_PORT,
+							       &qsfp_sensor_level0[0]);
+	sensor = nim_sensors_ptr[m_port_no];
+	sensor->read = &nim_read_qsfp_temp;
+	(*nim_sensors_cnt)++;
+
+	/* voltage */
+	sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+						 NT_SENSOR_SOURCE_LEVEL1_PORT,
+						 &qsfp_sensor_level1[0]);
+	sensor = sensor->next;
+	sensor->read = &nim_read_qsfp_voltage;
+	(*nim_sensors_cnt)++;
+
+	/* bias current sensors */
+	for (uint8_t i = 1; i < 5; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_bias_current;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* tx power */
+	for (uint8_t i = 5; i < 9; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_tx_power;
+		(*nim_sensors_cnt)++;
+	}
+
+	/* rx power */
+	for (uint8_t i = 9; i < 13; i++) {
+		sensor->next = allocate_nim_sensor_group(m_port_no, ctx,
+							 NT_SENSOR_SOURCE_LEVEL1_PORT,
+							 &qsfp_sensor_level1[i]);
+		sensor = sensor->next;
+		sensor->read = &nim_read_qsfp_rx_power;
+		(*nim_sensors_cnt)++;
+	}
+}
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd)
+{
+	struct nim_sensor_group *sg = malloc(sizeof(struct nim_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	sg->sensor = allocate_sensor_by_description(port, ssrc, sd);
+	sg->ctx = ctx;
+	sg->next = NULL;
+	return sg;
+}
+
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt)
+{
+	int res = i2c_nim_common_construct(ctx);
+
+	switch (translate_nimid(ctx)) {
+	case NT_NIM_SFP_SFP_PLUS:
+		sfp_preinit(ctx);
+		sfp_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP_PLUS:
+		qsfpplus_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	case NT_NIM_QSFP28:
+		qsfp28_preinit(ctx, extra ? *(int8_t *)extra : (int8_t)-1);
+		qsfp_plus_nim_add_all_sensors(port, ctx, nim_sensors_ptr,
+					      nim_sensors_cnt);
+		break;
+	default:
+		res = 1;
+		NT_LOG(ERR, NTHW, "NIM type %s is not supported.\n",
+		       nim_id_to_text(ctx->nim_id));
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nim/i2c_nim.h b/drivers/net/ntnic/nim/i2c_nim.h
new file mode 100644
index 0000000000..f664e6b7ee
--- /dev/null
+++ b/drivers/net/ntnic/nim/i2c_nim.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef I2C_NIM_H_
+#define I2C_NIM_H_
+
+#include "nthw_drv.h"
+#include "nim_defines.h"
+#include "nt_link_speed.h"
+
+#include "sensors.h"
+
+typedef struct sfp_nim_state {
+	uint8_t br; /* bit rate, units of 100 MBits/sec */
+} sfp_nim_state_t, *sfp_nim_state_p;
+
+typedef struct nim_i2c_ctx {
+	nthw_iic_t hwiic; /* depends on *Fpga_t, instance number, and cycle time */
+	uint8_t instance;
+	uint8_t devaddr;
+	uint8_t regaddr;
+	uint8_t nim_id;
+	nt_port_type_t port_type;
+
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	bool avg_pwr;
+	bool content_valid;
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	uint16_t len_info[5];
+	uint32_t speed_mask; /* Speeds supported by the NIM */
+	int8_t lane_idx; /* Is this associated with a single lane or all lanes (-1) */
+	uint8_t lane_count;
+	uint32_t options;
+	bool tx_disable;
+	bool dmi_supp;
+
+	union {
+		struct {
+			bool sfp28;
+			bool sfpplus;
+			bool dual_rate;
+			bool hw_rate_sel;
+			bool sw_rate_sel;
+			bool cu_type;
+			bool tri_speed;
+			bool ext_cal;
+			bool addr_chg;
+		} sfp;
+
+		struct {
+			bool rx_only;
+			bool qsfp28;
+			union {
+				struct {
+					uint8_t rev_compliance;
+					bool media_side_fec_ctrl;
+					bool host_side_fec_ctrl;
+					bool media_side_fec_ena;
+					bool host_side_fec_ena;
+				} qsfp28;
+			} specific_u;
+		} qsfp;
+
+	} specific_u;
+} nim_i2c_ctx_t, *nim_i2c_ctx_p;
+
+struct nim_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	void (*read)(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+	struct nim_i2c_ctx *ctx;
+	struct nim_sensor_group *next;
+};
+
+struct nim_sensor_group *
+allocate_nim_sensor_group(uint8_t port, struct nim_i2c_ctx *ctx,
+			  enum nt_sensor_source_e ssrc,
+			  struct nt_adapter_sensor_description *sd);
+
+/*
+ * Utility functions
+ */
+
+nt_nim_identifier_t translate_nimid(const nim_i2c_ctx_t *ctx);
+
+/*
+ * Builds an nim state for the port implied by `ctx`, returns zero
+ * if successful, and non-zero otherwise. SFP and QSFP nims are supported
+ */
+int nim_state_build(nim_i2c_ctx_t *ctx, sfp_nim_state_t *state);
+
+/*
+ * Returns a type name such as "SFP/SFP+" for a given NIM type identifier,
+ * or the string "ILLEGAL!".
+ */
+const char *nim_id_to_text(uint8_t nim_id);
+
+int nim_sfp_nim_set_tx_laser_disable(nim_i2c_ctx_p ctx, bool disable);
+
+int nim_qsfp_plus_nim_set_tx_laser_disable(nim_i2c_ctx_t *ctx, bool disable,
+				       int lane_idx);
+
+int nim_set_link_speed(nim_i2c_ctx_p ctx, nt_link_speed_t speed);
+
+/*
+ * This function tries to classify NIM based on it's ID and some register reads
+ * and collects information into ctx structure. The @extra parameter could contain
+ * the initialization argument for specific type of NIMS.
+ */
+int construct_and_preinit_nim(nim_i2c_ctx_p ctx, void *extra, uint8_t port,
+			      struct nim_sensor_group **nim_sensors_ptr,
+			      uint16_t *nim_sensors_cnt);
+
+int read_data_lin(nim_i2c_ctx_p ctx, uint16_t lin_addr, uint16_t length,
+		void *data);
+
+#endif /* I2C_NIM_H_ */
diff --git a/drivers/net/ntnic/nim/nim_defines.h b/drivers/net/ntnic/nim/nim_defines.h
new file mode 100644
index 0000000000..da3567d073
--- /dev/null
+++ b/drivers/net/ntnic/nim/nim_defines.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NIM_DEFINES_H_
+#define NIM_DEFINES_H_
+
+#define NIM_IDENTIFIER_ADDR 0 /* 1 byte */
+
+#define SFP_BIT_RATE_ADDR 12 /* 1 byte */
+#define SFP_VENDOR_NAME_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_ADDR 84 /* 8bytes */
+
+#define SFP_CONTROL_STATUS_LIN_ADDR (110U + 256U) /* 0xA2 */
+#define SFP_SOFT_TX_DISABLE_BIT (1U << 6)
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_SUP_LEN_INFO_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_REV_ADDR 184 /* 2bytes */
+#define QSFP_VENDOR_SN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_ADDR 212 /* 8bytes */
+
+/* I2C addresses */
+#define nim_i2c_0xa0 0xA0 /* Basic I2C address */
+#define nim_i2c_0xa2 0xA2 /* Diagnostic monitoring */
+#define nim_i2c_0xac 0xAC /* Address of integrated PHY */
+
+typedef enum {
+	NIM_OPTION_TEMP = 0,
+	NIM_OPTION_SUPPLY,
+	NIM_OPTION_RX_POWER,
+	NIM_OPTION_TX_BIAS,
+	NIM_OPTION_TX_POWER,
+	NIM_OPTION_TX_DISABLE,
+	/* Indicates that the module should be checked for the two next FEC types */
+	NIM_OPTION_FEC,
+	NIM_OPTION_MEDIA_SIDE_FEC,
+	NIM_OPTION_HOST_SIDE_FEC,
+	NIM_OPTION_RX_ONLY
+} nim_option_t;
+
+enum nt_nim_identifier_e {
+	NT_NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NT_NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NT_NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NT_NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NT_NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NT_NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NT_NIM_XFP = 0x06, /* Nim type = XFP */
+	NT_NIM_XFF = 0x07, /* Nim type = XFF */
+	NT_NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NT_NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NT_NIM_X2 = 0x0A, /* Nim type = X2 */
+	NT_NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NT_NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NT_NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NT_NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NT_NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+typedef enum nt_nim_identifier_e nt_nim_identifier_t;
+
+/*
+ * Port types
+ * The use of all non-generic XX_NOT_PRESENT is deprecated - use
+ * NT_PORT_TYPE_NIM_NOT_PRESENT instead
+ */
+enum nt_port_type_e {
+	NT_PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	NT_PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	NT_PORT_TYPE_RJ45, /* RJ45 type */
+	NT_PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	NT_PORT_TYPE_SFP_SX, /* SFP SX */
+	NT_PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	NT_PORT_TYPE_SFP_LX, /* SFP LX */
+	NT_PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	NT_PORT_TYPE_SFP_ZX, /* SFP ZX */
+	NT_PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	NT_PORT_TYPE_SFP_CU, /* SFP copper */
+	NT_PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	NT_PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	NT_PORT_TYPE_XFP, /* XFP */
+	NT_PORT_TYPE_XPAK, /* XPAK */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	NT_PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	NT_PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	NT_PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	NT_PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	NT_PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	NT_PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	NT_PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	NT_PORT_TYPE_CFP4, /* CFP4 type */
+	NT_PORT_TYPE_CFP4_LR4 = NT_PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	NT_PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	NT_PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	NT_PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	NT_PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	NT_PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	NT_PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	NT_PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	NT_PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	NT_PORT_TYPE_QSFP28, /* QSFP28 type */
+	NT_PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	NT_PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_PASSIVE_DAC =
+		NT_PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	NT_PORT_TYPE_QSFP_ACTIVE_DAC =
+		NT_PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	NT_PORT_TYPE_SFP_28, /* SFP28 type */
+	NT_PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	NT_PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	NT_PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	NT_PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	NT_PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	NT_PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	NT_PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	NT_PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	NT_PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	NT_PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	NT_PORT_TYPE_SFP_FX, /* SFP FX */
+	NT_PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	/* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_FR,
+	/* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_DR,
+	/* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+	NT_PORT_TYPE_QSFP28_LR,
+};
+
+typedef enum nt_port_type_e nt_port_type_t, *nt_port_type_p;
+
+#endif /* NIM_DEFINES_H_ */
diff --git a/drivers/net/ntnic/nim/nt_link_speed.c b/drivers/net/ntnic/nim/nt_link_speed.c
new file mode 100644
index 0000000000..35c75f5e56
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "nt_link_speed.h"
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		return "NotAvail";
+	case NT_LINK_SPEED_10M:
+		return "10M";
+	case NT_LINK_SPEED_100M:
+		return "100M";
+	case NT_LINK_SPEED_1G:
+		return "1G";
+	case NT_LINK_SPEED_10G:
+		return "10G";
+	case NT_LINK_SPEED_25G:
+		return "25G";
+	case NT_LINK_SPEED_40G:
+		return "40G";
+	case NT_LINK_SPEED_50G:
+		return "50G";
+	case NT_LINK_SPEED_100G:
+		return "100G";
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		return "Unhandled";
+	}
+}
+
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed)
+{
+	uint64_t n_link_speed = 0ULL;
+
+	switch (e_link_speed) {
+	case NT_LINK_SPEED_UNKNOWN:
+		n_link_speed = 0UL;
+		break;
+	case NT_LINK_SPEED_10M:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100M:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_1G:
+		n_link_speed = (1ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_10G:
+		n_link_speed = (10ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_25G:
+		n_link_speed = (25ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_40G:
+		n_link_speed = (40ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_50G:
+		n_link_speed = (50ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	case NT_LINK_SPEED_100G:
+		n_link_speed = (100ULL * 1000ULL * 1000ULL * 1000ULL);
+		break;
+	default:
+		/* DEBUG assert: remind developer that a switch/case entry is needed here.... */
+		assert(false);
+		n_link_speed = 0UL;
+		break;
+	}
+	return n_link_speed;
+}
+
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length)
+{
+	size_t len = 0;
+
+	buffer[0] = 0;
+
+	for (int i = 0; i < 32; i++) {
+		if ((1U << i) & link_speed_mask) {
+			len = strlen(buffer);
+
+			if (len > 0) {
+				if ((length - len - 1) > 2) {
+					strncat(buffer, ", ", length);
+					len = strlen(buffer);
+				}
+			}
+
+			if (len < (length - 1))
+				strncat(buffer, nt_translate_link_speed(1 << i),
+					length);
+		}
+	}
+
+	return buffer;
+}
diff --git a/drivers/net/ntnic/nim/nt_link_speed.h b/drivers/net/ntnic/nim/nt_link_speed.h
new file mode 100644
index 0000000000..969e3fb867
--- /dev/null
+++ b/drivers/net/ntnic/nim/nt_link_speed.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NT_LINK_SPEED_H_
+#define NT_LINK_SPEED_H_
+
+#include <stdint.h>
+
+/*
+ * Link speed.
+ * Note this is a bitmask.
+ */
+enum nt_link_speed_e {
+	NT_LINK_SPEED_UNKNOWN = 0,
+	NT_LINK_SPEED_10M = 0x01, /* 10 Mbps */
+	NT_LINK_SPEED_100M = 0x02, /* 100 Mbps */
+	NT_LINK_SPEED_1G = 0x04, /* 1 Gbps  (Autoneg only) */
+	NT_LINK_SPEED_10G = 0x08, /* 10 Gbps (Autoneg only) */
+	NT_LINK_SPEED_40G = 0x10, /* 40 Gbps (Autoneg only) */
+	NT_LINK_SPEED_100G = 0x20, /* 100 Gbps (Autoneg only) */
+	NT_LINK_SPEED_50G = 0x40, /* 50 Gbps (Autoneg only) */
+	NT_LINK_SPEED_25G = 0x80, /* 25 Gbps (Autoneg only) */
+	NT_LINK_SPEED_END /* always keep this entry as the last in enum */
+};
+
+typedef enum nt_link_speed_e nt_link_speed_t;
+
+const char *nt_translate_link_speed(nt_link_speed_t link_speed);
+const char *nt_translate_link_speed_mask(uint32_t link_speed_mask, char *buffer,
+				      uint32_t length);
+uint64_t nt_get_link_speed(nt_link_speed_t e_link_speed);
+
+#endif /* NT_LINK_SPEED_H_ */
diff --git a/drivers/net/ntnic/nim/qsfp_registers.h b/drivers/net/ntnic/nim/qsfp_registers.h
new file mode 100644
index 0000000000..366dcbf06e
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_registers.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_REGISTERS_H
+#define _QSFP_REGISTERS_H
+
+/*
+ * QSFP Registers
+ */
+#define QSFP_INT_STATUS_RX_LOS_ADDR 3
+#define QSFP_TEMP_LIN_ADDR 22
+#define QSFP_VOLT_LIN_ADDR 26
+#define QSFP_RX_PWR_LIN_ADDR 34 /* uint16_t [0..3] */
+#define QSFP_TX_BIAS_LIN_ADDR 42 /* uint16_t [0..3] */
+#define QSFP_TX_PWR_LIN_ADDR 50 /* uint16_t [0..3] */
+
+#define QSFP_CONTROL_STATUS_LIN_ADDR 86
+#define QSFP_SOFT_TX_ALL_DISABLE_BITS 0x0F
+
+#define QSFP_EXTENDED_IDENTIFIER 129
+#define QSFP_POWER_CLASS_BITS_1_4 0xC0
+#define QSFP_POWER_CLASS_BITS_5_7 0x03
+
+#define QSFP_SUP_LEN_INFO_LIN_ADDR 142 /* 5bytes */
+#define QSFP_TRANSMITTER_TYPE_LIN_ADDR 147 /* 1byte */
+#define QSFP_VENDOR_NAME_LIN_ADDR 148 /* 16bytes */
+#define QSFP_VENDOR_PN_LIN_ADDR 168 /* 16bytes */
+#define QSFP_VENDOR_SN_LIN_ADDR 196 /* 16bytes */
+#define QSFP_VENDOR_DATE_LIN_ADDR 212 /* 8bytes */
+#define QSFP_VENDOR_REV_LIN_ADDR 184 /* 2bytes */
+
+#define QSFP_SPEC_COMPLIANCE_CODES_ADDR 131 /* 8 bytes */
+#define QSFP_EXT_SPEC_COMPLIANCE_CODES_ADDR 192 /* 1 byte */
+
+#define QSFP_OPTION3_LIN_ADDR 195
+#define QSFP_OPTION3_TX_DISABLE_BIT (1 << 4)
+
+#define QSFP_DMI_OPTION_LIN_ADDR 220
+#define QSFP_DMI_AVG_PWR_BIT (1 << 3)
+
+#define QSFP_TEMP_THRESH_LIN_ADDR (128 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_VOLT_THRESH_LIN_ADDR (144 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_RX_PWR_THRESH_LIN_ADDR (176 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_BIAS_THRESH_LIN_ADDR (184 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define QSFP_TX_PWR_THRESH_LIN_ADDR (192 + (3 * 128)) /* Page 3 */
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#endif /* _QSFP_REGISTERS_H */
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.c b/drivers/net/ntnic/nim/qsfp_sensors.c
new file mode 100644
index 0000000000..8264f8fb62
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdbool.h>
+
+#include "qsfp_sensors.h"
+
+#include "ntlog.h"
+#include "qsfp_registers.h"
+
+static bool qsfp_plus_nim_get_sensor(nim_i2c_ctx_p ctx, uint16_t addr,
+				   nim_option_t nim_option, uint8_t count,
+				   uint16_t *p_lane_values)
+{
+	(void)nim_option;
+
+	read_data_lin(ctx, addr, (uint16_t)(sizeof(uint16_t) * count),
+		    p_lane_values);
+
+	for (int i = 0; i < count; i++) {
+		*p_lane_values = (*p_lane_values); /* Swap to little endian */
+
+#ifdef NIM_DMI_TEST_VALUE
+		if (nim_option == NIM_OPTION_RX_POWER)
+			*p_lane_values = (uint16_t)NIM_DMI_RX_PWR_TEST_VALUE;
+		else
+			*p_lane_values = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+		p_lane_values++;
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool qsfp_plus_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TEMP_LIN_ADDR, NIM_OPTION_TEMP,
+				      1, (uint16_t *)p_value);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool qsfp_plus_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_VOLT_LIN_ADDR,
+				      NIM_OPTION_SUPPLY, 1, p_value);
+}
+
+/*
+ * Read NIM bias current for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_BIAS_LIN_ADDR,
+				      NIM_OPTION_TX_BIAS, 4, p_value);
+}
+
+/*
+ * Read NIM TX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_TX_POWER, 4, p_value);
+}
+
+/*
+ * Read NIM RX optical power for four lanes
+ */
+static bool qsfp_plus_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return qsfp_plus_nim_get_sensor(ctx, QSFP_TX_PWR_LIN_ADDR,
+				      NIM_OPTION_RX_POWER, 4, p_value);
+}
+
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_temperature(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)(res * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t res;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (qsfp_plus_nim_get_supply_voltage(sg->ctx, &res))
+		update_sensor_value(sg->sensor, (int)((res) / 10));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_bias_current(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i] * 2);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_tx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp[4] = { 0 };
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	bool res = qsfp_plus_nim_get_rx_power(sg->ctx, temp);
+
+	if (res) {
+		for (uint8_t i = 0; i < sg->ctx->lane_count; i++)
+			update_sensor_value(sg->sensor, (int)temp[i]);
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
diff --git a/drivers/net/ntnic/nim/qsfp_sensors.h b/drivers/net/ntnic/nim/qsfp_sensors.h
new file mode 100644
index 0000000000..de64b978cb
--- /dev/null
+++ b/drivers/net/ntnic/nim/qsfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _QSFP_H
+#define _QSFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_qsfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_qsfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _QSFP_H */
diff --git a/drivers/net/ntnic/nim/sfp_p_registers.h b/drivers/net/ntnic/nim/sfp_p_registers.h
new file mode 100644
index 0000000000..a0fbe2afd7
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_p_registers.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_P_REG_H
+#define _SFP_P_REG_H
+
+/*
+ * SFP/SFP+ Registers
+ */
+#define SFP_GB_ETH_COMP_CODES_LIN_ADDR 6
+#define SFP_GB_ETH_COMP_1000BASET_BIT (1 << 3)
+#define SFP_GB_ETH_COMP_1000BASECX_BIT (1 << 2)
+#define SFP_GB_ETH_COMP_1000BASELX_BIT (1 << 1)
+#define SFP_GB_ETH_COMP_1000BASESX_BIT (1 << 0)
+
+#define SFP_FIBER_CHAN_TRANS_TECH_LIN_ADDR 8
+#define SFP_FIBER_CHAN_TRANS_TECH_ACTIVE_CU_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_TECH_PASSIVE_CU_BIT (1 << 2)
+
+#define SFP_FIBER_CHAN_TRANS_MEDIA_LIN_ADDR 9
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM62_BIT (1 << 3)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_MM50_BIT (1 << 2)
+#define SFP_FIBER_CHAN_TRANS_MEDIA_SM_BIT (1 << 0)
+
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_SUP_LEN_INFO_LIN_ADDR 14 /* 5bytes */
+#define SFP_CU_LINK_LEN_LIN_ADDR 18 /* 1byte */
+#define SFP_VENDOR_NAME_LIN_ADDR 20 /* 16bytes */
+#define SFP_VENDOR_PN_LIN_ADDR 40 /* 16bytes */
+#define SFP_VENDOR_REV_LIN_ADDR 56 /* 4bytes */
+#define SFP_VENDOR_SN_LIN_ADDR 68 /* 16bytes */
+#define SFP_VENDOR_DATE_LIN_ADDR 84 /* 8bytes */
+
+/* The following field is only relevant to SFP+ and is marked as reserved for SFP */
+#define SFP_OPTION0_LIN_ADDR 64
+#define SFP_POWER_LEVEL2_REQ_BIT (1 << 1)
+
+#define SFP_DMI_OPTION_LIN_ADDR (92)
+#define SFP_DMI_IMPL_BIT (1 << 6)
+#define SFP_DMI_EXT_CAL_BIT (1 << 4)
+#define SFP_DMI_AVG_PWR_BIT (1 << 3)
+#define SFP_DMI_ADDR_CHG_BIT (1 << 2)
+
+#define SFP_ENHANCED_OPTIONS_LIN_ADDR (93)
+#define SFP_SOFT_TX_FAULT_IMPL_BIT (1 << 5)
+#define SFP_SOFT_TX_DISABLE_IMPL_BIT (1 << 6)
+
+#define SFP_SFF8472_COMPLIANCE_LIN_ADDR 94
+
+#define SFP_TEMP_THRESH_LIN_ADDR (0 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_VOLT_THRESH_LIN_ADDR (8 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_BIAS_THRESH_LIN_ADDR (16 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_TX_PWR_THRESH_LIN_ADDR (24 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+#define SFP_RX_PWR_THRESH_LIN_ADDR (32 + 256)
+/* 8bytes: HighAlarm, LowAlarm, HighWarn, LowWarn each 2 bytes */
+
+/* Calibration data addresses */
+#define SFP_RX_PWR_COEFF_LIN_ADDR (56 + 256) /* 5 x 32bit float  values */
+
+#define SFP_TX_BIAS_SLOPE_LIN_ADDR (76 + 256)
+#define SFP_TX_BIAS_OFFSET_LIN_ADDR (78 + 256)
+
+#define SFP_TX_PWR_SLOPE_LIN_ADDR (80 + 256)
+#define SFP_TX_PWR_OFFSET_LIN_ADDR (82 + 256)
+
+#define SFP_TEMP_SLOPE_LIN_ADDR (84 + 256)
+#define SFP_TEMP_OFFSET_LIN_ADDR (86 + 256)
+
+#define SFP_VOLT_SLOPE_LIN_ADDR (88 + 256)
+#define SFP_VOLT_OFFSET_LIN_ADDR (90 + 256)
+
+/* Live data */
+#define SFP_TEMP_LIN_ADDR (96 + 256)
+#define SFP_VOLT_LIN_ADDR (98 + 256)
+#define SFP_TX_BIAS_LIN_ADDR (100 + 256)
+#define SFP_TX_PWR_LIN_ADDR (102 + 256)
+#define SFP_RX_PWR_LIN_ADDR (104 + 256)
+
+#define SFP_SOFT_RATE0_BIT (1 << 3)
+#define SFP_TX_FAULT_SET_BIT (1 << 2)
+
+#define SFP_EXT_CTRL_STAT0_LIN_ADDR (118 + 256) /* 0xA2 area */
+#define SFP_SOFT_RATE1_BIT (1 << 3)
+#define SFP_POWER_LEVEL2_GET_BIT (1 << 1) /* For reading the actual level */
+#define SFP_POWER_LEVEL2_SET_BIT (1 << 0) /* For setting the wanted level */
+
+/* PHY addresses */
+#define SFP_PHY_LIN_ADDR (12 * 128)
+#define SFP_PHY_LIN_RNG 32 /* 16bit words */
+
+#endif /* _SFP_P_REG_H */
diff --git a/drivers/net/ntnic/nim/sfp_sensors.c b/drivers/net/ntnic/nim/sfp_sensors.c
new file mode 100644
index 0000000000..766d6feaf3
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+#include "sfp_sensors.h"
+
+#include "sfp_p_registers.h"
+
+/*
+ * Return calibrated data from an SFP module.
+ * It is first investigated if external calibration is to be used and if it is
+ * calibration data is retrieved. The function can only be used when calibration
+ * consists of a slope and offset factor. After function return p_data will point
+ * to 16bit data that can be either signed or unsigned.
+ */
+static bool sfp_nim_get_dmi_data(uint16_t data_addr, uint16_t slope_addr,
+			       uint16_t offset_addr, void *p_value,
+			       bool signed_data, nim_i2c_ctx_p ctx)
+{
+	int32_t value;
+	uint16_t slope = 1;
+	int16_t offset = 0;
+
+	if (!ctx->dmi_supp)
+		return false;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, data_addr, 2, p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+	/*
+	 * Inject test value which can be both signed and unsigned but handle
+	 * here as unsigned
+	 */
+#ifdef NIM_DMI_TEST_VALUE
+	*(uint16_t *)p_value = (uint16_t)NIM_DMI_TEST_VALUE;
+#endif
+
+#if defined(NIM_DMI_TEST_SLOPE) || defined(NIM_DMI_TEST_OFFSET)
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* External calibration is needed */
+		read_data_lin(ctx, slope_addr, sizeof(slope), &slope);
+		read_data_lin(ctx, offset_addr, sizeof(offset), &offset);
+
+		/* Swap calibration to little endian */
+		slope = htons(slope);
+		offset = htons(offset);
+
+#ifdef NIM_DMI_TEST_SLOPE
+		slope = NIM_DMI_TEST_SLOPE;
+#endif
+
+#ifdef NIM_DMI_TEST_OFFSET
+		offset = NIM_DMI_TEST_OFFSET; /* 0x0140 equals 1.25 */
+#endif
+
+		if (signed_data) {
+			value = *(int16_t *)p_value * slope / 256 + offset;
+
+			if (value > INT16_MAX)
+				value = INT16_MAX;
+			else if (value < INT16_MIN)
+				value = INT16_MIN;
+
+			*(int16_t *)p_value = (int16_t)value;
+		} else {
+			value = *(uint16_t *)p_value * slope / 256 + offset;
+
+			if (value > UINT16_MAX)
+				value = UINT16_MAX;
+			else if (value < 0)
+				value = 0;
+
+			*(uint16_t *)p_value = (uint16_t)value;
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Read NIM temperature
+ */
+static bool sfp_nim_get_temperature(nim_i2c_ctx_p ctx, int16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TEMP_LIN_ADDR, SFP_TEMP_SLOPE_LIN_ADDR,
+				  SFP_TEMP_OFFSET_LIN_ADDR, p_value, true, ctx);
+}
+
+/*
+ * Read NIM supply voltage
+ */
+static bool sfp_nim_get_supply_voltage(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_VOLT_LIN_ADDR, SFP_VOLT_SLOPE_LIN_ADDR,
+				  SFP_VOLT_OFFSET_LIN_ADDR, p_value, false, ctx);
+}
+
+/*
+ * Read NIM bias current
+ */
+static bool sfp_nim_get_tx_bias_current(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_BIAS_LIN_ADDR,
+				  SFP_TX_BIAS_SLOPE_LIN_ADDR,
+				  SFP_TX_BIAS_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Read NIM TX optical power
+ */
+static bool sfp_nim_get_tx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_dmi_data(SFP_TX_PWR_LIN_ADDR,
+				  SFP_TX_PWR_SLOPE_LIN_ADDR,
+				  SFP_TX_PWR_OFFSET_LIN_ADDR, p_value, false,
+				  ctx);
+}
+
+/*
+ * Return the SFP received power in units of 0.1uW from DMI data.
+ * If external calibration is necessary, the calibration data is retrieved and
+ * the calibration is carried out.
+ */
+static bool sfp_nim_get_calibrated_rx_power(nim_i2c_ctx_p ctx, uint16_t addr,
+		uint16_t *p_value)
+{
+	float rx_pwr_cal[5];
+	float power_raised;
+	float rx_power;
+
+	/* Read data in big endian format */
+	read_data_lin(ctx, addr, sizeof(*p_value), p_value);
+	*(uint16_t *)p_value =
+		htons(*(uint16_t *)p_value); /* Swap to little endian */
+
+#ifdef NIM_DMI_RX_PWR_TEST_VALUE
+	*p_value = NIM_DMI_RX_PWR_TEST_VALUE;
+#endif
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+	ctx->specific_u.sfp.ext_cal = true;
+#endif
+
+	if (ctx->specific_u.sfp.ext_cal) {
+		/* Read calibration data in big endian format */
+		read_data_lin(ctx, SFP_RX_PWR_COEFF_LIN_ADDR, sizeof(rx_pwr_cal),
+			    rx_pwr_cal);
+
+		for (int i = 0; i < 5; i++) {
+			uint32_t *p_val = (uint32_t *)&rx_pwr_cal[i];
+			*p_val = ntohl(*p_val); /* 32 bit swap */
+		}
+
+#ifdef NIM_DMI_RX_PWR_CAL_DATA
+		/* Testdata for verification */
+		NIM_DMI_RX_PWR_CAL_DATA
+#endif
+
+		/*
+		 * If SFP module specifies external calibration - use calibration data
+		 * according to the polynomial correction formula
+		 * RxPwrCal = Coeff0 + Coeff1 * RxPwr   + Coeff2 * RxPwr^2 +
+		 *                     Coeff3 * RxPwr^3 + Coeff4 * RxPwr^4
+		 */
+		power_raised = 1.0;
+		rx_power = rx_pwr_cal[4]; /* Coeff0 */
+
+		for (int i = 3; i >= 0; i--) {
+			power_raised *= (float)*p_value;
+			rx_power += rx_pwr_cal[i] * power_raised;
+		}
+
+		/* Check out for out of range */
+		if (rx_power > 65535)
+			return false;
+
+		if (rx_power < 0)
+			*p_value = 0;
+		else
+			*p_value = (uint16_t)rx_power;
+	}
+
+	return true;
+}
+
+/*
+ * Read RX optical power if it exists
+ */
+static bool sfp_nim_get_rx_power(nim_i2c_ctx_p ctx, uint16_t *p_value)
+{
+	return sfp_nim_get_calibrated_rx_power(ctx, SFP_RX_PWR_LIN_ADDR, p_value);
+}
+
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	int16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_temperature(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 10 / 256));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_supply_voltage(sg->ctx, &temp)) {
+		update_sensor_value(sg->sensor,
+				    (int)(temp / 10)); /* Unit: 100uV -> 1mV */
+	} else {
+		update_sensor_value(sg->sensor, -1);
+	}
+}
+
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_bias_current(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)(temp * 2));
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_tx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
+
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint16_t temp;
+	(void)t_spi;
+
+	if (sg == NULL || sg->ctx == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+
+	if (sfp_nim_get_rx_power(sg->ctx, &temp))
+		update_sensor_value(sg->sensor, (int)temp);
+
+	else
+		update_sensor_value(sg->sensor, -1);
+}
diff --git a/drivers/net/ntnic/nim/sfp_sensors.h b/drivers/net/ntnic/nim/sfp_sensors.h
new file mode 100644
index 0000000000..ab56027dc8
--- /dev/null
+++ b/drivers/net/ntnic/nim/sfp_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SFP_H
+#define _SFP_H
+
+#include "sensors.h"
+#include "i2c_nim.h"
+
+/* Read functions */
+void nim_read_sfp_temp(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_voltage(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_bias_current(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_tx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+void nim_read_sfp_rx_power(struct nim_sensor_group *sg, nthw_spis_t *t_spi);
+
+#endif /* _SFP_H */
diff --git a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
index efdcc222a8..bd7cd2a27c 100644
--- a/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
+++ b/drivers/net/ntnic/nthw/core/nthw_clock_profiles.c
@@ -5,5 +5,12 @@
 #include "nthw_clock_profiles.h"
 
 /* Clock profile for NT200A02 2x40G, 2x100G */
-const int n_data_si5340_nt200a02_u23_v5;
-const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5;
+#define si5340_revd_register_t type_si5340_nt200a02_u23_v5
+#define si5340_revd_registers data_si5340_nt200a02_u23_v5
+#include "nthw_nt200a02_u23_si5340_v5.h"
+const int n_data_si5340_nt200a02_u23_v5 = SI5340_REVD_REG_CONFIG_NUM_REGS;
+const clk_profile_data_fmt2_t *p_data_si5340_nt200a02_u23_v5 =
+	(const clk_profile_data_fmt2_t *)&data_si5340_nt200a02_u23_v5[0];
+#undef si5340_revd_registers
+#undef si5340_revd_register_t
+#undef SI5340_REVD_REG_CONFIG_HEADER /*Disable the include once protection */
diff --git a/drivers/net/ntnic/nthw/core/nthw_core.h b/drivers/net/ntnic/nthw/core/nthw_core.h
index 798a95d5cf..025b6b61cc 100644
--- a/drivers/net/ntnic/nthw/core/nthw_core.h
+++ b/drivers/net/ntnic/nthw/core/nthw_core.h
@@ -16,9 +16,11 @@
 #include "nthw_pci_ta.h"
 #include "nthw_iic.h"
 
+#include "nthw_gmf.h"
 #include "nthw_gpio_phy.h"
 #include "nthw_mac_pcs.h"
 #include "nthw_mac_pcs_xxv.h"
+#include "nthw_rmc.h"
 #include "nthw_sdc.h"
 
 #include "nthw_spim.h"
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c
new file mode 100644
index 0000000000..fe63c461e5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <limits.h>
+#include <math.h>
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_gmf.h"
+
+nthw_gmf_t *nthw_gmf_new(void)
+{
+	nthw_gmf_t *p = malloc(sizeof(nthw_gmf_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_gmf_t));
+	return p;
+}
+
+void nthw_gmf_delete(nthw_gmf_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_gmf_t));
+		free(p);
+	}
+}
+
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	nt_module_t *mod = fpga_query_module(p_fpga, MOD_GMF, n_instance);
+
+	if (p == NULL)
+		return mod == NULL ? -1 : 0;
+
+	if (mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance\n",
+		       p_fpga->p_fpga_info->mp_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_gmf = mod;
+
+	p->mp_ctrl = module_get_register(p->mp_mod_gmf, GMF_CTRL);
+	p->mp_ctrl_enable = register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE);
+	p->mp_ctrl_ifg_enable = register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE);
+	p->mp_ctrl_ifg_auto_adjust_enable =
+		register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE);
+
+	p->mp_speed = module_get_register(p->mp_mod_gmf, GMF_SPEED);
+	p->mp_speed_ifg_speed = register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED);
+
+	p->mp_ifg_clock_delta =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA);
+	p->mp_ifg_clock_delta_delta =
+		register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA);
+
+	p->mp_ifg_max_adjust_slack =
+		module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK);
+	p->mp_ifg_max_adjust_slack_slack =
+		register_get_field(p->mp_ifg_max_adjust_slack, GMF_IFG_MAX_ADJUST_SLACK_SLACK);
+
+	p->mp_debug_lane_marker =
+		module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER);
+	p->mp_debug_lane_marker_compensation =
+		register_get_field(p->mp_debug_lane_marker, GMF_DEBUG_LANE_MARKER_COMPENSATION);
+
+	p->mp_stat_sticky = module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY);
+	p->mp_stat_sticky_data_underflowed =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED);
+	p->mp_stat_sticky_ifg_adjusted =
+		register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED);
+
+	p->mn_param_gmf_ifg_speed_mul =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1);
+	p->mn_param_gmf_ifg_speed_div =
+		fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1);
+
+	p->m_administrative_block = false;
+
+	p->mp_stat_next_pkt = module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT);
+	if (p->mp_stat_next_pkt) {
+		p->mp_stat_next_pkt_ns =
+			register_query_field(p->mp_stat_next_pkt,
+					     GMF_STAT_NEXT_PKT_NS);
+	} else {
+		p->mp_stat_next_pkt_ns = NULL;
+	}
+	p->mp_stat_max_delayed_pkt =
+		module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT);
+	if (p->mp_stat_max_delayed_pkt) {
+		p->mp_stat_max_delayed_pkt_ns =
+			register_query_field(p->mp_stat_max_delayed_pkt,
+					     GMF_STAT_MAX_DELAYED_PKT_NS);
+	} else {
+		p->mp_stat_max_delayed_pkt_ns = NULL;
+	}
+	p->mp_ctrl_ifg_tx_now_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS);
+	p->mp_ctrl_ifg_tx_on_ts_always =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS);
+
+	p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock =
+		register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK);
+
+	p->mp_ifg_clock_delta_adjust =
+		module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST);
+	if (p->mp_ifg_clock_delta_adjust) {
+		p->mp_ifg_clock_delta_adjust_delta =
+			register_query_field(p->mp_ifg_clock_delta_adjust,
+					     GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA);
+	} else {
+		p->mp_ifg_clock_delta_adjust_delta = NULL;
+	}
+	return 0;
+}
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable)
+{
+	if (!p->m_administrative_block)
+		field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_enable, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_now_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_now_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_always)
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_always, enable ? 1 : 0);
+}
+
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable)
+{
+	if (p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock) {
+		field_set_val_flush32(p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock,
+				    enable ? 1 : 0);
+	}
+}
+
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable)
+{
+	field_set_val_flush32(p->mp_ctrl_ifg_auto_adjust_enable, enable);
+}
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val)
+{
+	if (n_speed_val <=
+			(1ULL << (field_get_bit_width(p->mp_speed_ifg_speed) - 1))) {
+		field_set_val(p->mp_speed_ifg_speed, (uint32_t *)&n_speed_val,
+			     (field_get_bit_width(p->mp_speed_ifg_speed) <= 32 ? 1 :
+			      2));
+		field_flush_register(p->mp_speed_ifg_speed);
+		return 0;
+	}
+	return -1;
+}
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p)
+{
+	const int n_bit_width = field_get_bit_width(p->mp_speed_ifg_speed);
+
+	assert(n_bit_width >=
+	       22); /* Sanity check: GMF ver 1.2 is bw 22 - GMF ver 1.3 is bw 64 */
+	return n_bit_width;
+}
+
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed)
+{
+	const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+	const double f_adj_rate =
+		((double)((((double)n_rate_limit_bits) / (double)n_link_speed) *
+			  p->mn_param_gmf_ifg_speed_mul) /
+		 p->mn_param_gmf_ifg_speed_div);
+	const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+	uint64_t n_speed_val = (uint64_t)round(f_speed);
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent)
+{
+	uint64_t n_speed_val;
+
+	if (f_rate_limit_percent == 0.0 || f_rate_limit_percent == 100.0) {
+		n_speed_val = 0;
+	} else if (f_rate_limit_percent <= 99) {
+		const int n_bit_width = (nthw_gmf_get_ifg_speed_bit_width(p) / 2);
+		const double f_adj_rate =
+			((double)(f_rate_limit_percent *
+				  (double)p->mn_param_gmf_ifg_speed_mul) /
+			 p->mn_param_gmf_ifg_speed_div / 100);
+		const double f_speed = ((1UL / f_adj_rate) - 1) * exp2(n_bit_width);
+
+		n_speed_val = (uint64_t)f_speed;
+	} else {
+		return -1;
+	}
+
+	return nthw_gmf_set_ifg_speed_raw(p, n_speed_val);
+}
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta)
+{
+	field_set_val(p->mp_ifg_clock_delta_delta, (uint32_t *)&delta, 2);
+	field_flush_register(p->mp_ifg_clock_delta_delta);
+}
+
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust)
+{
+	if (p->mp_ifg_clock_delta_adjust) {
+		field_set_val(p->mp_ifg_clock_delta_adjust_delta,
+			     (uint32_t *)&delta_adjust, 2);
+		field_flush_register(p->mp_ifg_clock_delta_adjust_delta);
+	}
+}
+
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack)
+{
+	field_set_val(p->mp_ifg_max_adjust_slack_slack, (uint32_t *)&slack, 2);
+	field_flush_register(p->mp_ifg_max_adjust_slack_slack);
+}
+
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation)
+{
+	field_set_val_flush32(p->mp_debug_lane_marker_compensation, compensation);
+}
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p)
+{
+	uint32_t status = 0;
+
+	register_update(p->mp_stat_sticky);
+
+	if (field_get_val32(p->mp_stat_sticky_data_underflowed))
+		status |= GMF_STATUS_MASK_DATA_UNDERFLOWED;
+	if (field_get_val32(p->mp_stat_sticky_ifg_adjusted))
+		status |= GMF_STATUS_MASK_IFG_ADJUSTED;
+
+	return status;
+}
+
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status)
+{
+	if (status & GMF_STATUS_MASK_DATA_UNDERFLOWED)
+		field_set_flush(p->mp_stat_sticky_data_underflowed);
+	if (status & GMF_STATUS_MASK_IFG_ADJUSTED)
+		field_set_flush(p->mp_stat_sticky_ifg_adjusted);
+}
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_next_pkt) {
+		register_update(p->mp_stat_next_pkt);
+		field_get_val(p->mp_stat_next_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p)
+{
+	uint64_t value = ULONG_MAX;
+
+	if (p->mp_stat_max_delayed_pkt) {
+		register_update(p->mp_stat_max_delayed_pkt);
+		field_get_val(p->mp_stat_max_delayed_pkt_ns, (uint32_t *)&value, 2);
+	}
+	return value;
+}
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p)
+{
+	nthw_gmf_set_enable(p, false);
+	p->m_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.h b/drivers/net/ntnic/nthw/core/nthw_gmf.h
new file mode 100644
index 0000000000..aec1342be7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_gmf.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTHW_GMF_H__
+#define __NTHW_GMF_H__
+
+enum gmf_status_mask {
+	GMF_STATUS_MASK_DATA_UNDERFLOWED = 1,
+	GMF_STATUS_MASK_IFG_ADJUSTED
+};
+
+struct nthw_gmf {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_gmf;
+	int mn_instance;
+	/*  */
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_enable;
+	nt_field_t *mp_ctrl_ifg_enable;
+	nt_field_t *mp_ctrl_ifg_tx_now_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_always;
+	nt_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock;
+	nt_field_t *mp_ctrl_ifg_auto_adjust_enable;
+
+	nt_register_t *mp_speed;
+	nt_field_t *mp_speed_ifg_speed;
+
+	nt_register_t *mp_ifg_clock_delta;
+	nt_field_t *mp_ifg_clock_delta_delta;
+
+	nt_register_t *mp_ifg_clock_delta_adjust;
+	nt_field_t *mp_ifg_clock_delta_adjust_delta;
+
+	nt_register_t *mp_ifg_max_adjust_slack;
+	nt_field_t *mp_ifg_max_adjust_slack_slack;
+
+	nt_register_t *mp_debug_lane_marker;
+	nt_field_t *mp_debug_lane_marker_compensation;
+
+	nt_register_t *mp_stat_sticky;
+	nt_field_t *mp_stat_sticky_data_underflowed;
+	nt_field_t *mp_stat_sticky_ifg_adjusted;
+
+	nt_register_t *mp_stat_next_pkt;
+	nt_field_t *mp_stat_next_pkt_ns;
+
+	nt_register_t *mp_stat_max_delayed_pkt;
+	nt_field_t *mp_stat_max_delayed_pkt_ns;
+
+	int mn_param_gmf_ifg_speed_mul;
+	int mn_param_gmf_ifg_speed_div;
+
+	bool m_administrative_block; /* Used to enforce license expiry */
+};
+
+typedef struct nthw_gmf nthw_gmf_t;
+typedef struct nthw_gmf nthw_gmf;
+
+nthw_gmf_t *nthw_gmf_new(void);
+void nthw_gmf_delete(nthw_gmf_t *p);
+int nthw_gmf_init(nthw_gmf_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_enable(nthw_gmf_t *p, bool enable);
+
+void nthw_gmf_set_tx_now_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_always_enable(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_tx_on_ts_adjust_on_set_clock(nthw_gmf_t *p, bool enable);
+void nthw_gmf_set_ifg_auto_adjust_enable(nthw_gmf_t *p, bool enable);
+
+int nthw_gmf_get_ifg_speed_bit_width(nthw_gmf_t *p);
+
+int nthw_gmf_set_ifg_speed_raw(nthw_gmf_t *p, uint64_t n_speed_val);
+int nthw_gmf_set_ifg_speed_bits(nthw_gmf_t *p, const uint64_t n_rate_limit_bits,
+			    const uint64_t n_link_speed);
+int nthw_gmf_set_ifg_speed_percent(nthw_gmf_t *p, const double f_rate_limit_percent);
+
+void nthw_gmf_set_delta(nthw_gmf_t *p, uint64_t delta);
+void nthw_gmf_set_delta_adjust(nthw_gmf_t *p, uint64_t delta_adjust);
+void nthw_gmf_set_slack(nthw_gmf_t *p, uint64_t slack);
+void nthw_gmf_set_compensation(nthw_gmf_t *p, uint32_t compensation);
+
+uint32_t nthw_gmf_get_status_sticky(nthw_gmf_t *p);
+void nthw_gmf_set_status_sticky(nthw_gmf_t *p, uint32_t status);
+
+uint64_t nthw_gmf_get_stat_next_pkt_ns(nthw_gmf_t *p);
+uint64_t nthw_gmf_get_stat_max_pk_delayedt_ns(nthw_gmf_t *p);
+
+void nthw_gmf_administrative_block(nthw_gmf_t *p); /* Used to enforce license expiry blocking */
+
+#endif /* __NTHW_GMF_H__ */
diff --git a/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
new file mode 100644
index 0000000000..f063a1048a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_nt200a02_u23_si5340_v5.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef SI5340_REVD_REG_CONFIG_HEADER
+#define SI5340_REVD_REG_CONFIG_HEADER
+
+#define SI5340_REVD_REG_CONFIG_NUM_REGS 326
+
+typedef struct {
+	unsigned int address; /* 16-bit register address */
+	unsigned char value; /* 8-bit register data */
+} si5340_revd_register_t;
+
+si5340_revd_register_t const si5340_revd_registers[SI5340_REVD_REG_CONFIG_NUM_REGS] = {
+	{ 0x0B24, 0xC0 },
+	{ 0x0B25, 0x00 },
+	{ 0x0502, 0x01 },
+	{ 0x0505, 0x03 },
+	{ 0x0957, 0x17 },
+	{ 0x0B4E, 0x1A },
+	{ 0x0006, 0x00 },
+	{ 0x0007, 0x00 },
+	{ 0x0008, 0x00 },
+	{ 0x000B, 0x74 },
+	{ 0x0017, 0xF0 },
+	{ 0x0018, 0xFF },
+	{ 0x0021, 0x0F },
+	{ 0x0022, 0x00 },
+	{ 0x002B, 0x0A },
+	{ 0x002C, 0x20 },
+	{ 0x002D, 0x00 },
+	{ 0x002E, 0x00 },
+	{ 0x002F, 0x00 },
+	{ 0x0030, 0x00 },
+	{ 0x0031, 0x00 },
+	{ 0x0032, 0x00 },
+	{ 0x0033, 0x00 },
+	{ 0x0034, 0x00 },
+	{ 0x0035, 0x00 },
+	{ 0x0036, 0x00 },
+	{ 0x0037, 0x00 },
+	{ 0x0038, 0x00 },
+	{ 0x0039, 0x00 },
+	{ 0x003A, 0x00 },
+	{ 0x003B, 0x00 },
+	{ 0x003C, 0x00 },
+	{ 0x003D, 0x00 },
+	{ 0x0041, 0x00 },
+	{ 0x0042, 0x00 },
+	{ 0x0043, 0x00 },
+	{ 0x0044, 0x00 },
+	{ 0x009E, 0x00 },
+	{ 0x0102, 0x01 },
+	{ 0x0112, 0x02 },
+	{ 0x0113, 0x09 },
+	{ 0x0114, 0x3E },
+	{ 0x0115, 0x19 },
+	{ 0x0117, 0x06 },
+	{ 0x0118, 0x09 },
+	{ 0x0119, 0x3E },
+	{ 0x011A, 0x18 },
+	{ 0x0126, 0x06 },
+	{ 0x0127, 0x09 },
+	{ 0x0128, 0x3E },
+	{ 0x0129, 0x18 },
+	{ 0x012B, 0x06 },
+	{ 0x012C, 0x09 },
+	{ 0x012D, 0x3E },
+	{ 0x012E, 0x1A },
+	{ 0x013F, 0x00 },
+	{ 0x0140, 0x00 },
+	{ 0x0141, 0x40 },
+	{ 0x0206, 0x00 },
+	{ 0x0208, 0x00 },
+	{ 0x0209, 0x00 },
+	{ 0x020A, 0x00 },
+	{ 0x020B, 0x00 },
+	{ 0x020C, 0x00 },
+	{ 0x020D, 0x00 },
+	{ 0x020E, 0x00 },
+	{ 0x020F, 0x00 },
+	{ 0x0210, 0x00 },
+	{ 0x0211, 0x00 },
+	{ 0x0212, 0x00 },
+	{ 0x0213, 0x00 },
+	{ 0x0214, 0x00 },
+	{ 0x0215, 0x00 },
+	{ 0x0216, 0x00 },
+	{ 0x0217, 0x00 },
+	{ 0x0218, 0x00 },
+	{ 0x0219, 0x00 },
+	{ 0x021A, 0x00 },
+	{ 0x021B, 0x00 },
+	{ 0x021C, 0x00 },
+	{ 0x021D, 0x00 },
+	{ 0x021E, 0x00 },
+	{ 0x021F, 0x00 },
+	{ 0x0220, 0x00 },
+	{ 0x0221, 0x00 },
+	{ 0x0222, 0x00 },
+	{ 0x0223, 0x00 },
+	{ 0x0224, 0x00 },
+	{ 0x0225, 0x00 },
+	{ 0x0226, 0x00 },
+	{ 0x0227, 0x00 },
+	{ 0x0228, 0x00 },
+	{ 0x0229, 0x00 },
+	{ 0x022A, 0x00 },
+	{ 0x022B, 0x00 },
+	{ 0x022C, 0x00 },
+	{ 0x022D, 0x00 },
+	{ 0x022E, 0x00 },
+	{ 0x022F, 0x00 },
+	{ 0x0235, 0x00 },
+	{ 0x0236, 0x00 },
+	{ 0x0237, 0x00 },
+	{ 0x0238, 0xA6 },
+	{ 0x0239, 0x8B },
+	{ 0x023A, 0x00 },
+	{ 0x023B, 0x00 },
+	{ 0x023C, 0x00 },
+	{ 0x023D, 0x00 },
+	{ 0x023E, 0x80 },
+	{ 0x0250, 0x03 },
+	{ 0x0251, 0x00 },
+	{ 0x0252, 0x00 },
+	{ 0x0253, 0x00 },
+	{ 0x0254, 0x00 },
+	{ 0x0255, 0x00 },
+	{ 0x025C, 0x00 },
+	{ 0x025D, 0x00 },
+	{ 0x025E, 0x00 },
+	{ 0x025F, 0x00 },
+	{ 0x0260, 0x00 },
+	{ 0x0261, 0x00 },
+	{ 0x026B, 0x30 },
+	{ 0x026C, 0x35 },
+	{ 0x026D, 0x00 },
+	{ 0x026E, 0x00 },
+	{ 0x026F, 0x00 },
+	{ 0x0270, 0x00 },
+	{ 0x0271, 0x00 },
+	{ 0x0272, 0x00 },
+	{ 0x0302, 0x00 },
+	{ 0x0303, 0x00 },
+	{ 0x0304, 0x00 },
+	{ 0x0305, 0x00 },
+	{ 0x0306, 0x0D },
+	{ 0x0307, 0x00 },
+	{ 0x0308, 0x00 },
+	{ 0x0309, 0x00 },
+	{ 0x030A, 0x00 },
+	{ 0x030B, 0x80 },
+	{ 0x030C, 0x00 },
+	{ 0x030D, 0x00 },
+	{ 0x030E, 0x00 },
+	{ 0x030F, 0x00 },
+	{ 0x0310, 0x61 },
+	{ 0x0311, 0x08 },
+	{ 0x0312, 0x00 },
+	{ 0x0313, 0x00 },
+	{ 0x0314, 0x00 },
+	{ 0x0315, 0x00 },
+	{ 0x0316, 0x80 },
+	{ 0x0317, 0x00 },
+	{ 0x0318, 0x00 },
+	{ 0x0319, 0x00 },
+	{ 0x031A, 0x00 },
+	{ 0x031B, 0xD0 },
+	{ 0x031C, 0x1A },
+	{ 0x031D, 0x00 },
+	{ 0x031E, 0x00 },
+	{ 0x031F, 0x00 },
+	{ 0x0320, 0x00 },
+	{ 0x0321, 0xA0 },
+	{ 0x0322, 0x00 },
+	{ 0x0323, 0x00 },
+	{ 0x0324, 0x00 },
+	{ 0x0325, 0x00 },
+	{ 0x0326, 0x00 },
+	{ 0x0327, 0x00 },
+	{ 0x0328, 0x00 },
+	{ 0x0329, 0x00 },
+	{ 0x032A, 0x00 },
+	{ 0x032B, 0x00 },
+	{ 0x032C, 0x00 },
+	{ 0x032D, 0x00 },
+	{ 0x0338, 0x00 },
+	{ 0x0339, 0x1F },
+	{ 0x033B, 0x00 },
+	{ 0x033C, 0x00 },
+	{ 0x033D, 0x00 },
+	{ 0x033E, 0x00 },
+	{ 0x033F, 0x00 },
+	{ 0x0340, 0x00 },
+	{ 0x0341, 0x00 },
+	{ 0x0342, 0x00 },
+	{ 0x0343, 0x00 },
+	{ 0x0344, 0x00 },
+	{ 0x0345, 0x00 },
+	{ 0x0346, 0x00 },
+	{ 0x0347, 0x00 },
+	{ 0x0348, 0x00 },
+	{ 0x0349, 0x00 },
+	{ 0x034A, 0x00 },
+	{ 0x034B, 0x00 },
+	{ 0x034C, 0x00 },
+	{ 0x034D, 0x00 },
+	{ 0x034E, 0x00 },
+	{ 0x034F, 0x00 },
+	{ 0x0350, 0x00 },
+	{ 0x0351, 0x00 },
+	{ 0x0352, 0x00 },
+	{ 0x0359, 0x00 },
+	{ 0x035A, 0x00 },
+	{ 0x035B, 0x00 },
+	{ 0x035C, 0x00 },
+	{ 0x035D, 0x00 },
+	{ 0x035E, 0x00 },
+	{ 0x035F, 0x00 },
+	{ 0x0360, 0x00 },
+	{ 0x0802, 0x00 },
+	{ 0x0803, 0x00 },
+	{ 0x0804, 0x00 },
+	{ 0x0805, 0x00 },
+	{ 0x0806, 0x00 },
+	{ 0x0807, 0x00 },
+	{ 0x0808, 0x00 },
+	{ 0x0809, 0x00 },
+	{ 0x080A, 0x00 },
+	{ 0x080B, 0x00 },
+	{ 0x080C, 0x00 },
+	{ 0x080D, 0x00 },
+	{ 0x080E, 0x00 },
+	{ 0x080F, 0x00 },
+	{ 0x0810, 0x00 },
+	{ 0x0811, 0x00 },
+	{ 0x0812, 0x00 },
+	{ 0x0813, 0x00 },
+	{ 0x0814, 0x00 },
+	{ 0x0815, 0x00 },
+	{ 0x0816, 0x00 },
+	{ 0x0817, 0x00 },
+	{ 0x0818, 0x00 },
+	{ 0x0819, 0x00 },
+	{ 0x081A, 0x00 },
+	{ 0x081B, 0x00 },
+	{ 0x081C, 0x00 },
+	{ 0x081D, 0x00 },
+	{ 0x081E, 0x00 },
+	{ 0x081F, 0x00 },
+	{ 0x0820, 0x00 },
+	{ 0x0821, 0x00 },
+	{ 0x0822, 0x00 },
+	{ 0x0823, 0x00 },
+	{ 0x0824, 0x00 },
+	{ 0x0825, 0x00 },
+	{ 0x0826, 0x00 },
+	{ 0x0827, 0x00 },
+	{ 0x0828, 0x00 },
+	{ 0x0829, 0x00 },
+	{ 0x082A, 0x00 },
+	{ 0x082B, 0x00 },
+	{ 0x082C, 0x00 },
+	{ 0x082D, 0x00 },
+	{ 0x082E, 0x00 },
+	{ 0x082F, 0x00 },
+	{ 0x0830, 0x00 },
+	{ 0x0831, 0x00 },
+	{ 0x0832, 0x00 },
+	{ 0x0833, 0x00 },
+	{ 0x0834, 0x00 },
+	{ 0x0835, 0x00 },
+	{ 0x0836, 0x00 },
+	{ 0x0837, 0x00 },
+	{ 0x0838, 0x00 },
+	{ 0x0839, 0x00 },
+	{ 0x083A, 0x00 },
+	{ 0x083B, 0x00 },
+	{ 0x083C, 0x00 },
+	{ 0x083D, 0x00 },
+	{ 0x083E, 0x00 },
+	{ 0x083F, 0x00 },
+	{ 0x0840, 0x00 },
+	{ 0x0841, 0x00 },
+	{ 0x0842, 0x00 },
+	{ 0x0843, 0x00 },
+	{ 0x0844, 0x00 },
+	{ 0x0845, 0x00 },
+	{ 0x0846, 0x00 },
+	{ 0x0847, 0x00 },
+	{ 0x0848, 0x00 },
+	{ 0x0849, 0x00 },
+	{ 0x084A, 0x00 },
+	{ 0x084B, 0x00 },
+	{ 0x084C, 0x00 },
+	{ 0x084D, 0x00 },
+	{ 0x084E, 0x00 },
+	{ 0x084F, 0x00 },
+	{ 0x0850, 0x00 },
+	{ 0x0851, 0x00 },
+	{ 0x0852, 0x00 },
+	{ 0x0853, 0x00 },
+	{ 0x0854, 0x00 },
+	{ 0x0855, 0x00 },
+	{ 0x0856, 0x00 },
+	{ 0x0857, 0x00 },
+	{ 0x0858, 0x00 },
+	{ 0x0859, 0x00 },
+	{ 0x085A, 0x00 },
+	{ 0x085B, 0x00 },
+	{ 0x085C, 0x00 },
+	{ 0x085D, 0x00 },
+	{ 0x085E, 0x00 },
+	{ 0x085F, 0x00 },
+	{ 0x0860, 0x00 },
+	{ 0x0861, 0x00 },
+	{ 0x090E, 0x02 },
+	{ 0x091C, 0x04 },
+	{ 0x0943, 0x00 },
+	{ 0x0949, 0x00 },
+	{ 0x094A, 0x00 },
+	{ 0x094E, 0x49 },
+	{ 0x094F, 0x02 },
+	{ 0x095E, 0x00 },
+	{ 0x0A02, 0x00 },
+	{ 0x0A03, 0x07 },
+	{ 0x0A04, 0x01 },
+	{ 0x0A05, 0x07 },
+	{ 0x0A14, 0x00 },
+	{ 0x0A1A, 0x00 },
+	{ 0x0A20, 0x00 },
+	{ 0x0A26, 0x00 },
+	{ 0x0B44, 0x0F },
+	{ 0x0B4A, 0x08 },
+	{ 0x0B57, 0x0E },
+	{ 0x0B58, 0x01 },
+	{ 0x001C, 0x01 },
+	{ 0x0B24, 0xC3 },
+	{ 0x0B25, 0x02 },
+};
+
+#endif /* SI5340_REVD_REG_CONFIG_HEADER */
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c
new file mode 100644
index 0000000000..ec32dd88e6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "nthw_rmc.h"
+
+nthw_rmc_t *nthw_rmc_new(void)
+{
+	nthw_rmc_t *p = malloc(sizeof(nthw_rmc_t));
+
+	if (p)
+		memset(p, 0, sizeof(nthw_rmc_t));
+	return p;
+}
+
+void nthw_rmc_delete(nthw_rmc_t *p)
+{
+	if (p) {
+		memset(p, 0, sizeof(nthw_rmc_t));
+		free(p);
+	}
+}
+
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->mn_instance = n_instance;
+	p->mp_mod_rmc = p_mod;
+
+	/* Params */
+	p->mb_is_vswitch = p_fpga->p_fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH;
+	p->mn_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS,
+					     fpga_get_product_param(p_fpga, NT_PORTS, 0));
+	p->mn_nims = fpga_get_product_param(p_fpga, NT_NIMS, 0);
+	p->mb_administrative_block = false;
+
+	NT_LOG(DBG, NTHW, "%s: RMC %d: vswitch=%d\n", p_adapter_id_str,
+	       p->mn_instance, p->mb_is_vswitch);
+
+	p->mp_reg_ctrl = module_get_register(p->mp_mod_rmc, RMC_CTRL);
+
+	p->mp_fld_ctrl_block_stat_drop =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_fld_ctrl_block_keep_alive =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_fld_ctrl_block_mac_port =
+		register_get_field(p->mp_reg_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+
+	p->mp_reg_status = module_query_register(p->mp_mod_rmc, RMC_STATUS);
+	if (p->mp_reg_status) {
+		p->mp_fld_sf_ram_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_SF_RAM_OF);
+		p->mp_fld_descr_fifo_of =
+			register_get_field(p->mp_reg_status, RMC_STATUS_DESCR_FIFO_OF);
+	}
+
+	p->mp_reg_dbg = module_query_register(p->mp_mod_rmc, RMC_DBG);
+	if (p->mp_reg_dbg) {
+		p->mp_fld_dbg_merge =
+			register_get_field(p->mp_reg_dbg, RMC_DBG_MERGE);
+	}
+
+	p->mp_reg_mac_if = module_query_register(p->mp_mod_rmc, RMC_MAC_IF);
+	if (p->mp_reg_mac_if) {
+		p->mp_fld_mac_if_err =
+			register_get_field(p->mp_reg_mac_if, RMC_MAC_IF_ERR);
+	}
+	return 0;
+}
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p)
+{
+	return field_get_updated(p->mp_fld_ctrl_block_mac_port);
+}
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_sf_ram_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p)
+{
+	return (p->mp_reg_status) ? field_get_updated(p->mp_fld_descr_fifo_of) :
+	       0xffffffff;
+}
+
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p)
+{
+	return (p->mp_reg_dbg) ? field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff;
+}
+
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p)
+{
+	return (p->mp_reg_mac_if) ? field_get_updated(p->mp_fld_mac_if_err) :
+	       0xffffffff;
+}
+
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask)
+{
+	field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, mask);
+}
+
+void nthw_rmc_block(nthw_rmc_t *p)
+{
+	/* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */
+	if (!p->mb_administrative_block) {
+		field_set_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_set_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	}
+}
+
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_secondary)
+{
+	uint32_t n_block_mask = ~0U << (b_is_secondary ? p->mn_nims : p->mn_ports);
+
+	if (p->mb_is_vswitch) {
+		/*
+		 * VSWITCH: NFV: block bits: phy_nim_ports(2) + rtd_ports(4) +
+		 * roa_recirculate_port(1)
+		 */
+		n_block_mask = 1 << (2 + 4); /* block only ROA recirculate */
+	}
+
+	/* BLOCK_STATT(0)=0 BLOCK_KEEPA(1)=0 BLOCK_MAC_PORT(8:11)=0 */
+	if (!p->mb_administrative_block) {
+		field_clr_flush(p->mp_fld_ctrl_block_stat_drop);
+		field_clr_flush(p->mp_fld_ctrl_block_keep_alive);
+		field_set_val_flush32(p->mp_fld_ctrl_block_mac_port, n_block_mask);
+	}
+}
+
+void nthw_rmc_administrative_block(nthw_rmc_t *p)
+{
+	/* block all MAC ports */
+	field_set_flush(p->mp_fld_ctrl_block_mac_port);
+	p->mb_administrative_block = true;
+}
diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.h b/drivers/net/ntnic/nthw/core/nthw_rmc.h
new file mode 100644
index 0000000000..2df4462287
--- /dev/null
+++ b/drivers/net/ntnic/nthw/core/nthw_rmc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTHW_RMC_H_
+#define NTHW_RMC_H_
+
+struct nthw_rmc {
+	nt_fpga_t *mp_fpga;
+	nt_module_t *mp_mod_rmc;
+	int mn_instance;
+
+	int mn_ports;
+	int mn_nims;
+	bool mb_is_vswitch;
+
+	bool mb_administrative_block;
+
+	/* RMC CTRL register */
+	nt_register_t *mp_reg_ctrl;
+	nt_field_t *mp_fld_ctrl_block_stat_drop;
+	nt_field_t *mp_fld_ctrl_block_keep_alive;
+	nt_field_t *mp_fld_ctrl_block_mac_port;
+
+	/* RMC Status register */
+	nt_register_t *mp_reg_status;
+	nt_field_t *mp_fld_sf_ram_of;
+	nt_field_t *mp_fld_descr_fifo_of;
+
+	/* RMC DBG register */
+	nt_register_t *mp_reg_dbg;
+	nt_field_t *mp_fld_dbg_merge;
+
+	/* RMC MAC_IF register */
+	nt_register_t *mp_reg_mac_if;
+	nt_field_t *mp_fld_mac_if_err;
+};
+
+typedef struct nthw_rmc nthw_rmc_t;
+typedef struct nthw_rmc nthw_rmc;
+
+nthw_rmc_t *nthw_rmc_new(void);
+void nthw_rmc_delete(nthw_rmc_t *p);
+int nthw_rmc_init(nthw_rmc_t *p, nt_fpga_t *p_fpga, int n_instance);
+
+uint32_t nthw_rmc_get_mac_block(nthw_rmc_t *p);
+void nthw_rmc_set_mac_block(nthw_rmc_t *p, uint32_t mask);
+void nthw_rmc_block(nthw_rmc_t *p);
+void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_secondary);
+void nthw_rmc_administrative_block(nthw_rmc_t *p);
+
+uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p);
+uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p);
+
+#endif /* NTHW_RMC_H_ */
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
new file mode 100644
index 0000000000..bf120ccb39
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "avr_sensors.h"
+#include "ntlog.h"
+
+#define MAX_ADAPTERS 2
+
+uint8_t s_fpga_indexes[MAX_ADAPTERS] = { 0 }; /* _NTSD_MAX_NUM_ADAPTERS_ */
+static uint8_t get_fpga_idx(unsigned int adapter_no);
+
+/*
+ * This function setups monitoring of AVR sensors
+ */
+static uint8_t _avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no,
+				const char *p_name,
+				enum sensor_mon_device avr_dev,
+				uint8_t avr_dev_reg, enum sensor_mon_endian end,
+				enum sensor_mon_sign si, uint16_t mask)
+{
+	uint8_t fpga_idx = get_fpga_idx(m_adapter_no);
+	struct sensor_mon_setup16 avr_sensor_setup;
+
+	/* Setup monitoring in AVR placing results in FPGA */
+	avr_sensor_setup.setup_cnt = 1;
+	avr_sensor_setup.setup_data[0].fpga_idx = fpga_idx;
+	avr_sensor_setup.setup_data[0].device = avr_dev;
+	avr_sensor_setup.setup_data[0].device_register = avr_dev_reg;
+	avr_sensor_setup.setup_data[0].format = (uint16_t)(end | si << 2);
+
+	avr_sensor_setup.setup_data[0].mask = mask;
+	avr_sensor_setup.setup_data[0].pos =
+		0; /* So far for all sensors in table */
+
+	/*
+	 * At first it is the task of ntservice to test limit_low and limit_high on all
+	 * board sensors. Later the test is going to be carried out by the AVR
+	 */
+	if (si == SENSOR_MON_SIGNED) {
+		avr_sensor_setup.setup_data[0].int16.limit_low =
+			SENSOR_MON_INT16_NAN;
+		avr_sensor_setup.setup_data[0].int16.limit_high =
+			SENSOR_MON_INT16_NAN;
+	} else {
+		avr_sensor_setup.setup_data[0].uint16.limit_low =
+			SENSOR_MON_UINT16_NAN;
+		avr_sensor_setup.setup_data[0].uint16.limit_high =
+			SENSOR_MON_UINT16_NAN;
+	}
+
+	int result = nt_avr_sensor_mon_setup(&avr_sensor_setup, s_spi);
+
+	if (result)
+		NT_LOG(ERR, ETHDEV, "%s: sensor initialization error\n", p_name);
+
+	return fpga_idx;
+}
+
+static void avr_read(struct nt_sensor_group *sg, nthw_spis_t *t_spi)
+{
+	uint32_t p_sensor_result;
+
+	if (sg == NULL || sg->sensor == NULL)
+		return;
+
+	sensor_read(t_spi, sg->sensor->fpga_idx, &p_sensor_result);
+	update_sensor_value(sg->sensor, sg->conv_func(p_sensor_result));
+}
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->sensor = allocate_sensor(m_adapter_no, p_name, ssrc, type, index,
+				     NT_SENSOR_DISABLE_ALARM, si);
+	sg->sensor->fpga_idx = _avr_sensor_init(s_spi, m_adapter_no, p_name, avr_dev,
+					       avr_dev_reg, end, si, mask);
+	sg->read = &avr_read;
+	sg->conv_func = conv_func;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	return sg;
+}
+
+static uint8_t get_fpga_idx(unsigned int adapter_no)
+{
+	uint8_t tmp = s_fpga_indexes[adapter_no];
+
+	s_fpga_indexes[adapter_no] = (uint8_t)(tmp + 1);
+
+	return tmp;
+}
diff --git a/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
new file mode 100644
index 0000000000..b8c37a12cb
--- /dev/null
+++ b/drivers/net/ntnic/sensors/avr_sensors/avr_sensors.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_SENSORS_H
+#define _AVR_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+#include "avr_intf.h"
+#include "ntavr.h"
+
+struct nt_sensor_group *
+avr_sensor_init(nthw_spi_v3_t *s_spi, uint8_t m_adapter_no, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum sensor_mon_device avr_dev,
+		uint8_t avr_dev_reg, enum sensor_mon_endian end,
+		enum sensor_mon_sign si, int (*conv_func)(uint32_t),
+		uint16_t mask);
+
+#endif /* _AVR_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.c b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
new file mode 100644
index 0000000000..8e52379df8
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stddef.h>
+#include <math.h>
+
+#include "tempmon.h"
+#include "board_sensors.h"
+#include "ntlog.h"
+
+static void fpga_temperature_sensor_read(struct nt_sensor_group *sg,
+		nthw_spis_t *t_spi)
+{
+	int temp = 0;
+	(void)t_spi;
+	if (sg == NULL || sg->sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "failed to read FPGA temperature\n");
+		return;
+	}
+	struct nt_fpga_sensor_monitor *temp_monitor = sg->monitor;
+	uint32_t val = field_get_updated(temp_monitor->fields[0]);
+
+	temp = (val * 20159 - 44752896) / 16384;
+
+	update_sensor_value(sg->sensor, temp);
+}
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga)
+{
+	struct nt_sensor_group *sg = malloc(sizeof(struct nt_sensor_group));
+
+	if (sg == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor group is NULL", __func__);
+		return NULL;
+	}
+	init_sensor_group(sg);
+	sg->monitor = tempmon_new();
+	tempmon_init(sg->monitor, p_fpga);
+	sg->sensor =
+		allocate_sensor(adapter_no, "FPGA", NT_SENSOR_SOURCE_ADAPTER,
+				NT_SENSOR_TYPE_TEMPERATURE, sensor_idx,
+				NT_SENSOR_DISABLE_ALARM, SENSOR_MON_UNSIGNED);
+	sg->read = &fpga_temperature_sensor_read;
+	return sg;
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/board_sensors.h b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
new file mode 100644
index 0000000000..a7f75b7ae4
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/board_sensors.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _BOARD_SENSORS_H
+#define _BOARD_SENSORS_H
+
+#include <stdint.h>
+
+#include "sensors.h"
+
+#include "nthw_fpga_model.h"
+
+struct nt_sensor_group *fpga_temperature_sensor_init(uint8_t adapter_no,
+		unsigned int sensor_idx,
+		nt_fpga_t *p_fpga);
+
+#endif /* _BOARD_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.c b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
new file mode 100644
index 0000000000..2cd3709205
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "tempmon.h"
+#include "ntlog.h"
+#include "nthw_register.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void)
+{
+	struct nt_fpga_sensor_monitor *temp =
+		malloc(sizeof(struct nt_fpga_sensor_monitor));
+	if (temp == NULL)
+		NT_LOG(ERR, ETHDEV, "%s: monitor is NULL\n", __func__);
+	return temp;
+}
+
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga)
+{
+	if (t == NULL || p_fpga == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: bad argument(s)\n", __func__);
+		return;
+	}
+	/* fetch initialized module */
+	t->fpga = p_fpga;
+	t->mod = nthw_get_module(t->fpga, MOD_TEMPMON, 0);
+	if (t->mod == NULL)
+		NT_LOG(ERR, ETHDEV, "module is NULL\n");
+	/* fetch register */
+	t->reg = module_get_register(t->mod, TEMPMON_STAT);
+	if (t->reg == NULL)
+		NT_LOG(ERR, ETHDEV, "register is NULL\n");
+	/* fetch fields */
+	t->fields = malloc(sizeof(nt_field_t *));
+	if (t->fields == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: field is NULL", __func__);
+		return;
+	}
+	t->fields[0] = register_get_field(t->reg, TEMPMON_STAT_TEMP);
+	if (t->fields[0] == NULL)
+		NT_LOG(ERR, ETHDEV, "field is NULL\n");
+}
diff --git a/drivers/net/ntnic/sensors/board_sensors/tempmon.h b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
new file mode 100644
index 0000000000..6f2017b714
--- /dev/null
+++ b/drivers/net/ntnic/sensors/board_sensors/tempmon.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _TEMPMON_H
+#define _TEMPMON_H
+
+#include "nthw_fpga_model.h"
+#include <stdlib.h>
+
+#include "sensors.h"
+
+struct nt_fpga_sensor_monitor *tempmon_new(void);
+void tempmon_init(struct nt_fpga_sensor_monitor *t, nt_fpga_t *p_fpga);
+
+#endif /* _TEMPMON_H */
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
new file mode 100644
index 0000000000..e130855a35
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <arpa/inet.h>
+
+#include "nim_sensors.h"
+#include "ntlog.h"
+
+#define TEMP NT_SENSOR_TYPE_TEMPERATURE
+#define VOLT NT_SENSOR_TYPE_VOLTAGE
+#define CURR NT_SENSOR_TYPE_CURRENT
+#define PWR NT_SENSOR_TYPE_POWER
+
+#define SNA NT_SENSOR_SUBTYPE_NA
+#define AVG NT_SENSOR_SUBTYPE_POWER_AVERAGE
+
+#define ENA NT_SENSOR_ENABLE_ALARM
+#define DIA NT_SENSOR_DISABLE_ALARM
+
+/*
+ * Sensors for SFP/SFP+/SFP28. The name of the level 0 temperature sensor is
+ * empty and will then be set automatically
+ */
+struct nt_adapter_sensor_description sfp_sensors_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_SFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description sfp_sensors_level1[4] = {
+	{ VOLT, SNA, NT_SENSOR_SFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_SFP_TX_BIAS, DIA, "Tx Bias" },
+	{ PWR, AVG, NT_SENSOR_SFP_TX_POWER, DIA, "Tx" },
+	{ PWR, AVG, NT_SENSOR_SFP_RX_POWER, DIA, "Rx" }
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level0[1] = {
+	{ TEMP, SNA, NT_SENSOR_QSFP_TEMP, DIA, "" },
+};
+
+struct nt_adapter_sensor_description qsfp_sensor_level1[13] = {
+	{ VOLT, SNA, NT_SENSOR_QSFP_SUPPLY, DIA, "Supply" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS1, DIA, "Tx Bias 1" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS2, DIA, "Tx Bias 2" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS3, DIA, "Tx Bias 3" },
+	{ CURR, SNA, NT_SENSOR_QSFP_TX_BIAS4, DIA, "Tx Bias 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER1, DIA, "Tx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER2, DIA, "Tx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER3, DIA, "Tx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_TX_POWER4, DIA, "Tx 4" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER1, DIA, "Rx 1" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER2, DIA, "Rx 2" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER3, DIA, "Rx 3" },
+	{ PWR, AVG, NT_SENSOR_QSFP_RX_POWER4, DIA, "Rx 4" }
+};
diff --git a/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
new file mode 100644
index 0000000000..c68c9aa924
--- /dev/null
+++ b/drivers/net/ntnic/sensors/nim_sensors/nim_sensors.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NIM_SENSORS_H
+#define _NIM_SENSORS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "sensors.h"
+
+#define XFP_TEMP_LIN_ADDR 96
+
+extern struct nt_adapter_sensor_description sfp_sensors_level0[1];
+extern struct nt_adapter_sensor_description sfp_sensors_level1[4];
+extern struct nt_adapter_sensor_description qsfp_sensor_level0[1];
+extern struct nt_adapter_sensor_description qsfp_sensor_level1[13];
+
+#endif /* _NIM_SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/ntavr/avr_intf.h b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
new file mode 100644
index 0000000000..feeec6e13a
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/avr_intf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _AVR_INTF
+#define _AVR_INTF
+
+#include <stdint.h>
+
+#define SENSOR_MON_UINT16_NAN 0xFFFF /* Most positive number used as NaN */
+#define SENSOR_MON_INT16_NAN \
+	((int16_t)0x8000) /* Most negative number used as NaN */
+
+/*
+ * Specify the nature of the raw data. AVR and ntservice must use this
+ * information when comparing or converting to native format which is little endian
+ */
+enum sensor_mon_endian { SENSOR_MON_LITTLE_ENDIAN, SENSOR_MON_BIG_ENDIAN };
+
+enum sensor_mon_sign {
+	SENSOR_MON_UNSIGNED,
+	SENSOR_MON_SIGNED, /* 2's complement */
+};
+
+/* Define sensor devices */
+enum sensor_mon_device {
+	SENSOR_MON_PSU_EXAR_7724_0 = 0, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_EXAR_7724_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0, /* na      NT100E3, page-0 */
+	SENSOR_MON_PSU_LTM_4676_1, /* na      NT100E3, page-0 */
+	SENSOR_MON_INA219_1, /* NT40E3, NT100E3 */
+	SENSOR_MON_INA219_2, /* NT40E3, NT100E3 */
+	SENSOR_MON_MAX6642, /* NT40E3, NT100E3 */
+	SENSOR_MON_DS1775, /* NT40E3, NT100E3 */
+	SENSOR_MON_FAN, /* NT40E3, NT100E3 */
+	SENSOR_MON_AVR, /* NT40E3, NT100E3 */
+	SENSOR_MON_PEX8734, /* na      NT100E3 */
+	SENSOR_MON_RATE_COUNT, /* NT40E3, NT100E3 */
+	SENSOR_MON_PSU_LTM_4676_0_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_PSU_LTM_4676_1_1, /* na      NT100E3, page-1 */
+	SENSOR_MON_MP2886A, /* na,     na,      NT200A02, */
+	SENSOR_MON_PSU_EM2260_1, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_PSU_EM2120_2, /*     na,      na,      na,       na, NT200D01^M */
+	SENSOR_MON_MP2886A_PSU_1, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_2, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_3, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8645PGVT_PSU_4, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_5, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_MP8869S_PSU_6, /*     na,      na,      na, NT200A02,        na,   NT50B01, */
+	SENSOR_MON_DEVICE_COUNT
+};
+
+#pragma pack(1)
+struct sensor_mon_setup_data16 {
+	uint8_t fpga_idx; /* Destination of results */
+	uint8_t device; /* Device to monitor */
+	uint8_t device_register; /* Sensor within device */
+	uint16_t mask; /* Indicates active bits */
+	uint8_t pos; /* Position of first active bit */
+	uint16_t format; /* b0,1:sensor_mon_endian_t endian */
+	/* b2,3:sensor_mon_sign_t   sign */
+	union {
+		struct {
+			int16_t limit_low; /* Signed alarm limit low */
+			int16_t limit_high; /* Signed alarm limit high */
+		} int16;
+
+		struct {
+			uint16_t limit_low; /* Unsigned alarm limit low */
+			uint16_t limit_high; /* Unsigned alarm limit high */
+		} uint16;
+	};
+};
+
+#pragma pack()
+struct sensor_mon_setup16 {
+	uint8_t setup_cnt; /* Number of entries in setup_data */
+	struct sensor_mon_setup_data16 setup_data[40];
+};
+
+/* Define sensor monitoring control */
+enum sensor_mon_control {
+	SENSOR_MON_CTRL_STOP = 0, /* Stop sensor monitoring */
+	SENSOR_MON_CTRL_RUN = 1, /* Start sensor monitoring */
+	SENSOR_MON_CTRL_REM_ALL_SENSORS =
+		2, /* Stop and remove all sensor monitoring setup */
+};
+
+#endif /* _AVR_INTF */
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.c b/drivers/net/ntnic/sensors/ntavr/ntavr.c
new file mode 100644
index 0000000000..6d8c3042b1
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntavr.h"
+#include "ntlog.h"
+
+static int txrx(nthw_spi_v3_t *s_spi, enum avr_opcodes opcode, size_t txsz,
+		uint16_t *tx, size_t *rxsz, uint16_t *rx)
+{
+	int res = 1;
+	struct tx_rx_buf m_tx = { .size = (uint16_t)txsz, .p_buf = tx };
+	struct tx_rx_buf m_rx = { .size = (uint16_t)*rxsz, .p_buf = rx };
+
+	res = nthw_spi_v3_transfer(s_spi, opcode, &m_tx, &m_rx);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s transfer failed - %i", __func__, res);
+		return res;
+	}
+
+	if (rxsz != NULL)
+		*rxsz = m_rx.size;
+
+	return res;
+}
+
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result)
+{
+	return nthw_spis_read_sensor(t_spi, fpga_idx, p_sensor_result);
+}
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup, nthw_spi_v3_t *s_spi)
+{
+	int error;
+	size_t tx_size;
+	size_t rx_size = 0;
+
+	tx_size = sizeof(struct sensor_mon_setup16) - sizeof(p_setup->setup_data);
+	tx_size += sizeof(p_setup->setup_data[0]) * p_setup->setup_cnt;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_SETUP, tx_size, (uint16_t *)p_setup,
+		     &rx_size, NULL);
+
+	if (error) {
+		NT_LOG(ERR, ETHDEV, "%s failed\n", __func__);
+		return error;
+	}
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+	return 0;
+}
+
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl)
+{
+	int error;
+	size_t rx_size = 0;
+
+	error = txrx(s_spi, AVR_OP_SENSOR_MON_CONTROL, sizeof(ctrl),
+		     (uint16_t *)(&ctrl), &rx_size, NULL);
+
+	if (error != 0)
+		return error;
+
+	if (rx_size != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Returned data: Expected size = 0, Actual = %zu",
+		       __func__, rx_size);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/sensors/ntavr/ntavr.h b/drivers/net/ntnic/sensors/ntavr/ntavr.h
new file mode 100644
index 0000000000..b7a7aeb908
--- /dev/null
+++ b/drivers/net/ntnic/sensors/ntavr/ntavr.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTAVR_H
+#define _NTAVR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "avr_intf.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+
+/*
+ * @internal
+ * @brief AVR Device Enum
+ *
+ * Global names for identifying an AVR device for Generation2 adapters
+ */
+enum ntavr_device {
+	NTAVR_MAINBOARD, /* Mainboard AVR device */
+	NTAVR_FRONTBOARD /* Frontboard AVR device */
+};
+
+int nt_avr_sensor_mon_setup(struct sensor_mon_setup16 *p_setup,
+			nthw_spi_v3_t *s_spi);
+int nt_avr_sensor_mon_ctrl(nthw_spi_v3_t *s_spi, enum sensor_mon_control ctrl);
+uint32_t sensor_read(nthw_spis_t *t_spi, uint8_t fpga_idx,
+		     uint32_t *p_sensor_result);
+
+#endif /* _NTAVR_H */
diff --git a/drivers/net/ntnic/sensors/sensor_types.h b/drivers/net/ntnic/sensors/sensor_types.h
new file mode 100644
index 0000000000..e944dca5ce
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensor_types.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSOR_TYPES_H
+#define _SENSOR_TYPES_H
+
+/*
+ * Sensor types
+ */
+enum nt_sensor_type_e {
+	NT_SENSOR_TYPE_UNKNOWN = 0,
+	NT_SENSOR_TYPE_TEMPERATURE = 1, /* Unit: 0.1 degree Celsius */
+	NT_SENSOR_TYPE_VOLTAGE = 2, /* Unit: 1 mV */
+	NT_SENSOR_TYPE_CURRENT = 3, /* Unit: 1 uA */
+	NT_SENSOR_TYPE_POWER = 4, /* Unit: 0.1 uW */
+	NT_SENSOR_TYPE_FAN = 5, /* Unit: 1 RPM (Revolutions Per Minute) */
+	NT_SENSOR_TYPE_HIGH_POWER = 6, /* Unit: 1 mW */
+	NT_SENSOR_TYPE_NUMBER = 7,
+};
+
+/*
+ * Generic SFP/SFP+/SFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific SFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_sfp {
+	NT_SENSOR_SFP_TEMP,
+	NT_SENSOR_SFP_SUPPLY,
+	NT_SENSOR_SFP_TX_BIAS,
+	NT_SENSOR_SFP_TX_POWER,
+	NT_SENSOR_SFP_RX_POWER,
+};
+
+/*
+ * Generic QSFP/QSFP+/QSFP28 sensors
+ *
+ * These sensors should be used instead of all adapter specific QSFP sensors
+ * that have been deprecated..
+ */
+enum nt_sensors_qsfp {
+	NT_SENSOR_QSFP_TEMP,
+	NT_SENSOR_QSFP_SUPPLY,
+	NT_SENSOR_QSFP_TX_BIAS1,
+	NT_SENSOR_QSFP_TX_BIAS2,
+	NT_SENSOR_QSFP_TX_BIAS3,
+	NT_SENSOR_QSFP_TX_BIAS4,
+	NT_SENSOR_QSFP_TX_POWER1,
+	NT_SENSOR_QSFP_TX_POWER2,
+	NT_SENSOR_QSFP_TX_POWER3,
+	NT_SENSOR_QSFP_TX_POWER4,
+	NT_SENSOR_QSFP_RX_POWER1,
+	NT_SENSOR_QSFP_RX_POWER2,
+	NT_SENSOR_QSFP_RX_POWER3,
+	NT_SENSOR_QSFP_RX_POWER4,
+};
+
+typedef enum nt_sensor_type_e nt_sensor_type_t;
+
+/*
+ * Sensor subtypes
+ */
+enum nt_sensor_sub_type_e {
+	NT_SENSOR_SUBTYPE_NA = 0,
+	/*
+	 * Subtype for NT_SENSOR_TYPE_POWER type on optical modules (optical modulation
+	 * amplitude measured)
+	 */
+	NT_SENSOR_SUBTYPE_POWER_OMA,
+	/* Subtype for NT_SENSOR_TYPE_POWER type on optical modules (average power measured) */
+	NT_SENSOR_SUBTYPE_POWER_AVERAGE,
+	/* Subtype for NT_SENSOR_TYPE_HIGH_POWER type on adapters (total power consumption) */
+	NT_SENSOR_SUBTYPE_POWER_TOTAL
+};
+
+typedef enum nt_sensor_sub_type_e nt_sensor_sub_type_t;
+
+/*
+ * Sensor source
+ */
+enum nt_sensor_source_e {
+	NT_SENSOR_SOURCE_UNKNOWN = 0x00, /* Unknown source */
+	/*
+	 * Sensors located in a port. These are primary sensors - usually NIM temperature. Presence
+	 * depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_PORT =
+		0x01,
+	/*
+	 * Level 1 sensors located in a port. These are secondary sensors - usually NIM supply
+	 * voltage, Tx bias and Rx/Tx optical power. Presence depends on adapter and NIM type.
+	 */
+	NT_SENSOR_SOURCE_LEVEL1_PORT =
+		0x02,
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_PORT =
+		0x04, /* Level 2 sensors located in a port */
+#endif
+	NT_SENSOR_SOURCE_ADAPTER = 0x08, /* Sensors mounted on the adapter */
+	NT_SENSOR_SOURCE_LEVEL1_ADAPTER =
+		0x10, /* Level 1 sensors mounted on the adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_SENSOR_SOURCE_LEVEL2_ADAPTER =
+		0x20, /* Level 2 sensors mounted on the adapter */
+#endif
+};
+
+/*
+ * Sensor state
+ */
+enum nt_sensor_state_e {
+	NT_SENSOR_STATE_UNKNOWN = 0, /* Unknown state */
+	NT_SENSOR_STATE_INITIALIZING = 1, /* The sensor is initializing */
+	NT_SENSOR_STATE_NORMAL = 2, /* Sensor values are within range */
+	NT_SENSOR_STATE_ALARM = 3, /* Sensor values are out of range */
+	NT_SENSOR_STATE_NOT_PRESENT =
+		4 /* The sensor is not present, for example, SFP without diagnostics */
+};
+
+typedef enum nt_sensor_state_e nt_sensor_state_t;
+
+/*
+ * Sensor value
+ */
+#define NT_SENSOR_NAN \
+	(0x80000000) /* Indicates that sensor value or sensor limit is not valid (Not a Number) */
+
+/*
+ * Primary/Secondary
+ */
+enum nt_bonding_type_e {
+	NT_BONDING_UNKNOWN, /* Unknown bonding type */
+	NT_BONDING_PRIMARY, /* Adapter is primary in the bonding */
+	NT_BONDING_SECONDARY, /* Adapter is secondary in the bonding */
+	NT_BONDING_PEER /* Adapter is bonded, but relationship is symmetric */
+};
+
+enum nt_sensors_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_FPGA_TEMP, /* FPGA temperature sensor */
+};
+
+/*
+ * Adapter types
+ */
+enum nt_adapter_type_e {
+	NT_ADAPTER_TYPE_UNKNOWN = 0, /* Unknown adapter type */
+	NT_ADAPTER_TYPE_NT4E, /* NT4E network adapter */
+	NT_ADAPTER_TYPE_NT20E, /* NT20E network adapter */
+	NT_ADAPTER_TYPE_NT4E_STD, /* NT4E-STD network adapter */
+	NT_ADAPTER_TYPE_NT4E_PORT, /* NTPORT4E expansion adapter */
+	NT_ADAPTER_TYPE_NTBPE, /* NTBPE bypass adapter */
+	NT_ADAPTER_TYPE_NT20E2, /* NT20E2 network adapter */
+	NT_ADAPTER_TYPE_RESERVED1, /* Reserved */
+	NT_ADAPTER_TYPE_RESERVED2, /* Reserved */
+	NT_ADAPTER_TYPE_NT40E2_1, /* NT40E2-1 network adapter */
+	NT_ADAPTER_TYPE_NT40E2_4, /* NT40E2-4 network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4T_BP, /* NT4E2-4T-BP bypass network adapter */
+	NT_ADAPTER_TYPE_NT4E2_4_PTP, /* NT4E2-4 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E2_PTP, /* NT20E2 PTP network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT40E3_4_PTP, /* NT40E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT100E3_1_PTP, /* NT100E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT20E3_2_PTP, /* NT20E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP, /* NT80E3 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200E3_2, /* NT200E3 network adapter */
+	NT_ADAPTER_TYPE_NT200A01, /* NT200A01 network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X100 =
+		NT_ADAPTER_TYPE_NT200A01, /* NT200A01 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A01_4X1, /* NT40A01_4X1 network adapter with IEEE1588 */
+	NT_ADAPTER_TYPE_NT200A01_2X40, /* NT200A01 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT80E3_2_PTP_8X10, /* NT80E3 8 x 10 Gbps network adapter with IEEE1588 */
+	/*  */
+	NT_ADAPTER_TYPE_INTEL_A10_4X10, /* Intel PAC A10 GX 4 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_INTEL_A10_1X40, /* Intel PAC A10 GX 1 x 40 Gbps network adapter */
+	/*  */
+	NT_ADAPTER_TYPE_NT200A01_8X10, /* NT200A01 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X100, /* NT200A02 2 x 100 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X40, /* NT200A02 2 x 40 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A01_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A01_2X25, /* NT200A01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_2X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_2X25, /* NT200A02 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_4X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT200A02_4X10_25 =
+		NT_ADAPTER_TYPE_NT200A02_4X25, /* NT200A02 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_8X10, /* NT200A02 8 x 10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X25, /* Deprecated */
+	NT_ADAPTER_TYPE_NT50B01_2X10_25 =
+		NT_ADAPTER_TYPE_NT50B01_2X25, /* NT50B01 2 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT200A02_2X1_10, /* NT200A02 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X1_10, /* NT100A01 4 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT100A01_4X10_25, /* NT100A01 4 x 10/25 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT50B01_2X1_10, /* NT50B01 2 x 1/10 Gbps network adapter */
+	NT_ADAPTER_TYPE_NT40A11_4X1_10, /* NT40A11 4 x 1/10 Gbps network adapter */
+#ifndef DOXYGEN_INTERNAL_ONLY
+	NT_ADAPTER_TYPE_ML605 = 10000, /* NT20E2 eval board */
+#endif
+	NT_ADAPTER_TYPE_4GARCH_HAMOA =
+		(1U
+		 << 29), /* Bit to mark to adapters as a 4GArch Hamoa adapter */
+	NT_ADAPTER_TYPE_4GARCH =
+		(1U << 30), /* Bit to mark to adapters as a 4GArch adapter */
+	/* NOTE: do *NOT* add normal adapters after the group bit mark enums */
+};
+
+/* The NT200E3 adapter sensor id's */
+typedef enum nt_sensors_adapter_nt200_e3_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200E3_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200E3_FAN_SPEED, /* FAN speed sensor */
+	/* MCU (Micro Controller Unit) temperature sensor located inside enclosure below FAN */
+	NT_SENSOR_NT200E3_MCU_TEMP,
+	NT_SENSOR_NT200E3_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200E3_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200E3_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NT200E3_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200E3_NIM_POWER,
+
+	NT_SENSOR_NT200E3_L1_MAX, /* Number of NT200E3 level 0,1 board sensors */
+} nt_sensors_adapter_nt200_e3_t;
+
+/*
+ * The following sensors are deprecated - generic types should be used instead
+ * The NIM temperature sensor must be the one with the lowest sensor_index
+ * (enum value) in order to be shown by the monitoring tool in port mode
+ */
+enum nt_sensors_port_nt200_e3_2_e {
+	/* Public sensors */
+	NT_SENSOR_NT200E3_NIM, /* QSFP28 temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	NT_SENSOR_NT200E3_SUPPLY, /* QSFP28 supply voltage sensor */
+	NT_SENSOR_NT200E3_TX_BIAS1, /* QSFP28 TX bias line 0 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS2, /* QSFP28 TX bias line 1 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS3, /* QSFP28 TX bias line 2 current sensor */
+	NT_SENSOR_NT200E3_TX_BIAS4, /* QSFP28 TX bias line 3 current sensor */
+	NT_SENSOR_NT200E3_RX1, /* QSFP28 RX line 0 power sensor */
+	NT_SENSOR_NT200E3_RX2, /* QSFP28 RX line 1 power sensor */
+	NT_SENSOR_NT200E3_RX3, /* QSFP28 RX line 2 power sensor */
+	NT_SENSOR_NT200E3_RX4, /* QSFP28 RX line 3 power sensor */
+	NT_SENSOR_NT200E3_TX1, /* QSFP28 TX line 0 power sensor */
+	NT_SENSOR_NT200E3_TX2, /* QSFP28 TX line 1 power sensor */
+	NT_SENSOR_NT200E3_TX3, /* QSFP28 TX line 2 power sensor */
+	NT_SENSOR_NT200E3_TX4, /* QSFP28 TX line 3 power sensor */
+	NT_SENSOR_NT200E3_PORT_MAX, /* Number of NT200E3 port sensors */
+};
+
+#endif
diff --git a/drivers/net/ntnic/sensors/sensors.c b/drivers/net/ntnic/sensors/sensors.c
new file mode 100644
index 0000000000..2a85843196
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "sensors.h"
+#include "ntlog.h"
+
+void sensor_deinit(struct nt_sensor_group *sg)
+{
+	if (sg) {
+		if (sg->sensor)
+			free(sg->sensor);
+		if (sg->monitor)
+			free(sg->monitor);
+		free(sg);
+	}
+}
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = si;
+
+	sensor->info.source = ssrc;
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.sensor_index = index;
+	sensor->info.type = type;
+	sensor->info.sub_type = NT_SENSOR_SUBTYPE_NA;
+	sensor->info.state = NT_SENSOR_STATE_INITIALIZING;
+	sensor->info.value = NT_SENSOR_NAN;
+	sensor->info.value_lowest = NT_SENSOR_NAN;
+	sensor->info.value_highest = NT_SENSOR_NAN;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, p_name,
+	       (strlen(p_name) > NT_INFO_SENSOR_NAME) ? NT_INFO_SENSOR_NAME :
+	       strlen(p_name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value)
+{
+	if (sensor == NULL)
+		return;
+	sensor->info.value = value;
+	if (sensor->info.value_highest < value ||
+			(unsigned int)sensor->info.value_highest == NT_SENSOR_NAN)
+		sensor->info.value_highest = value;
+	if (sensor->info.value_lowest > value ||
+			(unsigned int)sensor->info.value_lowest == NT_SENSOR_NAN)
+		sensor->info.value_lowest = value;
+}
+
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr)
+{
+	struct nt_adapter_sensor *sensor =
+		(struct nt_adapter_sensor *)malloc(sizeof(struct nt_adapter_sensor));
+	if (sensor == NULL) {
+		NT_LOG(ERR, ETHDEV, "%s: sensor is NULL", __func__);
+		return NULL;
+	}
+
+	sensor->alarm = descr->event_alarm;
+	sensor->m_enable_alarm = true;
+	sensor->m_intf_no = 0xFF;
+	sensor->m_adapter_no = 0xFF;
+	sensor->si = SENSOR_MON_UNSIGNED;
+
+	sensor->info.source_index = adapter_or_port_index;
+	sensor->info.source = ssrc;
+	sensor->info.type = descr->type;
+	sensor->info.sensor_index = descr->index;
+	memset(sensor->info.name, 0, NT_INFO_SENSOR_NAME);
+	memcpy(sensor->info.name, descr->name,
+	       (strlen(descr->name) > NT_INFO_SENSOR_NAME) ?
+	       NT_INFO_SENSOR_NAME :
+	       strlen(descr->name));
+	sensor->info.name[NT_INFO_SENSOR_NAME] = '\0';
+
+	return sensor;
+}
+
+void init_sensor_group(struct nt_sensor_group *sg)
+{
+	/* Set all pointers to NULL */
+	sg->sensor = NULL;
+	sg->monitor = NULL;
+	sg->next = NULL;
+	sg->read = NULL;
+	sg->conv_func = NULL;
+}
+
+/* Getters */
+int32_t get_value(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value;
+};
+
+int32_t get_lowest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_lowest;
+};
+
+int32_t get_highest(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.value_highest;
+};
+
+char *get_name(struct nt_sensor_group *sg)
+{
+	return sg->sensor->info.name;
+};
+
+/* Conversion functions */
+int null_signed(uint32_t p_sensor_result)
+{
+	return (int16_t)p_sensor_result;
+}
+
+int null_unsigned(uint32_t p_sensor_result)
+{
+	return (uint16_t)p_sensor_result;
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vch value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.015 (PRESCALE is accounted for)
+ * ******************************************************************************
+ */
+int exar7724_vch(uint32_t p_sensor_result)
+{
+	return p_sensor_result * 15; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Vin value to Napatech internal representation
+ * Doc: Vout = ReadVal * 0.0125
+ * ******************************************************************************
+ */
+int exar7724_vin(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 25) / 2; /* NT unit: 1mV */
+}
+
+/*
+ * ******************************************************************************
+ * For EXAR7724: Convert a read Tj value to Napatech internal representation
+ * Doc: Temp (in Kelvin) = (((ReadVal * 10mV) - 600mV) / (2mV/K)) + 300K =
+ *                      = ReadVal * 5K
+ * ******************************************************************************
+ */
+int exar7724_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * A value of 2730 is used instead of 2732 which is more correct but since
+	 * the temperature step is 5 degrees it is more natural to show these steps
+	 */
+	return p_sensor_result * 50 - 2730; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * Conversion function for Linear Tecnology Linear_5s_11s format.
+ * The functions returns Y * 2**N, where N = b[15:11] is a 5-bit two's complement
+ * integer and Y = b[10:0] is an 11-bit two's complement integer.
+ * The multiplier value is used for scaling to Napatech units.
+ * ******************************************************************************
+ */
+static int conv5s_11s(uint16_t value, int multiplier)
+{
+	int n, y;
+
+	y = value & 0x07FF;
+
+	if (value & 0x0400)
+		y -= 0x0800; /* The MSBit is a sign bit */
+
+	n = (value >> 11) & 0x1F;
+
+	if (n & 0x10)
+		n -= 0x20; /* The MSBit is a sign bit */
+
+	y *= multiplier;
+
+	if (n > 0)
+		y *= (1 << n);
+
+	else if (n < 0)
+		y /= (1 << (-n));
+
+	return y;
+}
+
+/*
+ * ******************************************************************************
+ * Temperature conversion from Linear_5s_11s format.
+ * ******************************************************************************
+ */
+int ltm4676_tj(uint32_t p_sensor_result)
+{
+	return (uint16_t)conv5s_11s(p_sensor_result, 10); /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MP2886a: Convert a read Tj value to Napatech internal representation
+ * ******************************************************************************
+ */
+int mp2886a_tj(uint32_t p_sensor_result)
+{
+	/*
+	 * MPS-2886p: READ_TEMPERATURE (register 0x8Dh)
+	 * READ_TEMPERATURE is a 2-byte, unsigned integer.
+	 */
+	return (uint16_t)p_sensor_result; /* NT unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For MAX6642: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int max6642_t(uint32_t p_sensor_result)
+{
+	if ((p_sensor_result >> 8) == 0xFF)
+		return NT_SENSOR_NAN;
+
+	/* The six lower bits are not used */
+	return (int)(((p_sensor_result >> 6) * 5) /
+		     2); /* NT unit: 0.25 deg, Native unit: 0.1C */
+}
+
+/*
+ * ******************************************************************************
+ * For DS1775: Convert a read temperature value to Napatech internal representation
+ * ******************************************************************************
+ */
+int ds1775_t(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 10) /
+	       256; /* NT unit: 0.1 deg, Native unit: 1/256 C */
+}
+
+/*
+ * ******************************************************************************
+ * For FAN: Convert a tick count to RPM
+ * NT unit: RPM, Native unit: 2 ticks/revolution
+ * ******************************************************************************
+ */
+int fan(uint32_t p_sensor_result)
+{
+	return (p_sensor_result * 60U / 4);
+}
diff --git a/drivers/net/ntnic/sensors/sensors.h b/drivers/net/ntnic/sensors/sensors.h
new file mode 100644
index 0000000000..1424b8bc83
--- /dev/null
+++ b/drivers/net/ntnic/sensors/sensors.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _SENSORS_H
+#define _SENSORS_H
+
+#include "sensor_types.h"
+#include "stream_info.h"
+#include "nthw_platform_drv.h"
+#include "nthw_drv.h"
+#include "nthw_spi_v3.h"
+#include "nthw_fpga_model.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "avr_intf.h"
+
+enum nt_sensor_event_alarm_e {
+	NT_SENSOR_ENABLE_ALARM,
+	NT_SENSOR_LOG_ALARM,
+	NT_SENSOR_DISABLE_ALARM,
+};
+
+/*
+ * Sensor Class types
+ */
+enum nt_sensor_class_e {
+	NT_SENSOR_CLASS_FPGA =
+		0, /* Class for FPGA based sensors e.g FPGA temperature */
+	NT_SENSOR_CLASS_MCU =
+		1, /* Class for MCU based sensors e.g MCU temperature */
+	NT_SENSOR_CLASS_PSU =
+		2, /* Class for PSU based sensors e.g PSU temperature */
+	NT_SENSOR_CLASS_PCB =
+		3, /* Class for PCB based sensors e.g PCB temperature */
+	NT_SENSOR_CLASS_NIM =
+		4, /* Class for NIM based sensors e.g NIM temperature */
+	NT_SENSOR_CLASS_ANY = 5, /* Class for ANY sensors e.g any sensors */
+};
+
+typedef enum nt_sensor_class_e nt_sensor_class_t;
+
+/*
+ * Port of the sensor class
+ */
+struct nt_adapter_sensor {
+	uint8_t m_adapter_no;
+	uint8_t m_intf_no;
+	uint8_t fpga_idx; /* for AVR sensors */
+	enum sensor_mon_sign si;
+	struct nt_info_sensor_s info;
+	enum nt_sensor_event_alarm_e alarm;
+	bool m_enable_alarm;
+};
+
+struct nt_fpga_sensor_monitor {
+	nt_fpga_t *fpga;
+	nt_module_t *mod;
+
+	nt_register_t *reg;
+	nt_field_t **fields;
+	uint8_t fields_num;
+};
+
+/*
+ * Sensor description.
+ * Describe the static behavior of the sensor.
+ */
+struct nt_adapter_sensor_description {
+	enum nt_sensor_type_e type; /* Sensor type. */
+	enum nt_sensor_sub_type_e sub_type; /* Sensor subtype (if any applicable) */
+	unsigned int index; /* Sensor group index. */
+	enum nt_sensor_event_alarm_e event_alarm; /* Enable/Disable event alarm */
+	char name[20]; /* Sensor name. */
+};
+
+struct nt_sensor_group {
+	struct nt_adapter_sensor *sensor;
+	struct nt_fpga_sensor_monitor *monitor;
+	void (*read)(struct nt_sensor_group *sg, nthw_spis_t *t_spi);
+
+	/* conv params are needed to call current conversion functions */
+	int (*conv_func)(uint32_t p_sensor_result);
+	/* i2c interface for NIM sensors */
+
+	struct nt_sensor_group *next;
+};
+
+void init_sensor_group(struct nt_sensor_group *sg);
+
+void update_sensor_value(struct nt_adapter_sensor *sensor, int32_t value);
+
+void sensor_deinit(struct nt_sensor_group *sg);
+
+/* getters */
+int32_t get_value(struct nt_sensor_group *sg);
+int32_t get_lowest(struct nt_sensor_group *sg);
+int32_t get_highest(struct nt_sensor_group *sg);
+char *get_name(struct nt_sensor_group *sg);
+
+struct nt_adapter_sensor *
+allocate_sensor(uint8_t adapter_or_port_index, const char *p_name,
+		enum nt_sensor_source_e ssrc, enum nt_sensor_type_e type,
+		unsigned int index, enum nt_sensor_event_alarm_e event_alarm,
+		enum sensor_mon_sign si);
+struct nt_adapter_sensor *
+allocate_sensor_by_description(uint8_t adapter_or_port_index,
+			       enum nt_sensor_source_e ssrc,
+			       struct nt_adapter_sensor_description *descr);
+
+/* conversion functions */
+int null_signed(uint32_t p_sensor_result);
+int null_unsigned(uint32_t p_sensor_result);
+int exar7724_tj(uint32_t p_sensor_result);
+int max6642_t(uint32_t p_sensor_result);
+int ds1775_t(uint32_t p_sensor_result);
+int ltm4676_tj(uint32_t p_sensor_result);
+int exar7724_vch(uint32_t p_sensor_result);
+int exar7724_vin(uint32_t p_sensor_result);
+int mp2886a_tj(uint32_t p_sensor_result);
+int fan(uint32_t p_sensor_result);
+
+#endif /* _SENSORS_H */
diff --git a/drivers/net/ntnic/sensors/stream_info.h b/drivers/net/ntnic/sensors/stream_info.h
new file mode 100644
index 0000000000..b94231fd8b
--- /dev/null
+++ b/drivers/net/ntnic/sensors/stream_info.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_INFO_H
+#define _STREAM_INFO_H
+
+#include "sensor_types.h"
+
+#include <stdint.h>
+
+/*
+ * This structure will return the sensor specific information
+ *
+ * The units used for the fields: value, value_lowest, value_highest, limit_low and
+ * limit_high depend on the type field. See @ref nt_sensor_type_e.
+ *
+ * For the limit_low and limit_high fields the following applies:\n
+ * If the sensor is located in a NIM (Network Interface Module), the limits are read
+ * from the NIM module via the DMI (Diagnostic Monitoring Interface) from the alarm
+ * and warning thresholds section, and the units are changed to internal representation.
+ * Only the alarm thresholds are used and are read only once during initialization.
+ * The limits cannot be changed.
+ *
+ * The value field is updated internally on a regular basis and is also based on a
+ * value read from the NIM which is also changed to internal representation.
+ *
+ * Not all NIM types support DMI data, and its presence must be determined by reading an
+ * option flag. In general, a NIM can read out: temperature, supply voltage,
+ * TX bias, TX optical power and RX optical power but not all NIM types support all
+ * 5 values.
+ *
+ * If external calibration is used (most NIM use internal calibration), both the
+ * current value and the threshold values are subjected to the specified calibration
+ * along with the change to internal calibration.
+ */
+#define NT_INFO_SENSOR_NAME 50
+struct nt_info_sensor_s {
+	enum nt_sensor_source_e
+	source; /* The source of the sensor (port or adapter on which the sensor resides) */
+	/*
+	 * The source index - the adapter number for adapter sensors and port number for port
+	 * sensors
+	 */
+	uint32_t source_index;
+	/*
+	 * The sensor index within the source index (sensor number on the adapter or sensor number
+	 * on the port)
+	 */
+	uint32_t sensor_index;
+	enum nt_sensor_type_e type; /* The sensor type */
+	enum nt_sensor_sub_type_e sub_type; /* The sensor subtype (if applicable) */
+	enum nt_sensor_state_e state; /* The current state (normal or alarm) */
+	int32_t value; /* The current value */
+	int32_t value_lowest; /* The lowest value registered */
+	int32_t value_highest; /* The highest value registered */
+	char name[NT_INFO_SENSOR_NAME + 1]; /* The sensor name */
+	enum nt_adapter_type_e
+	adapter_type; /* The adapter type where the sensor resides */
+};
+
+/* The NT200A02 adapter sensor id's */
+enum nt_sensors_adapter_nt200a02_e {
+	/* Public sensors (Level 0) */
+	NT_SENSOR_NT200A02_FPGA_TEMP, /* FPGA temperature sensor */
+	NT_SENSOR_NT200A02_FAN_SPEED, /* FAN speed sensor */
+
+	NT_SENSOR_NT200A02_MCU_TEMP,
+	NT_SENSOR_NT200A02_PSU0_TEMP, /* Power supply 0 temperature sensor */
+	NT_SENSOR_NT200A02_PSU1_TEMP, /* Power supply 1 temperature sensor */
+	NT_SENSOR_NT200A02_PCB_TEMP, /* PCB temperature sensor */
+
+	/* Diagnostic sensors (Level 1) */
+	/* Total power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NT200A02_POWER,
+	/* FPGA power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_FPGA_POWER,
+	/* DDR4 RAM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_DDR4_POWER,
+	/* NIM power consumption (calculated value) - does not generate alarms */
+	NT_SENSOR_NT200A02_NIM_POWER,
+
+	NT_SENSOR_NT200A02_L1_MAX, /* Number of NT200A01 level 0,1 board sensors */
+};
+
+#endif
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v16 4/8] net/ntnic: adds flow related FPGA functionality
  2023-09-08 16:07 ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
  2023-09-08 16:07   ` [PATCH v16 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
  2023-09-08 16:07   ` [PATCH v16 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
@ 2023-09-08 16:07   ` Mykola Kostenok
  2023-09-08 16:07   ` [PATCH v16 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
                     ` (4 subsequent siblings)
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-08 16:07 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The PMD will control the registers used for flow programming,
and this commit adds support for this.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_filter/flow_nthw_cat.c    | 1107 ++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_cat.h    |  372 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.c    |  146 +++
 .../ntnic/nthw/flow_filter/flow_nthw_csu.h    |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_flm.c    | 1140 +++++++++++++++++
 .../ntnic/nthw/flow_filter/flow_nthw_flm.h    |  422 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.c    |  293 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_hfu.h    |  100 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.c    |  254 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_hsh.h    |   81 ++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.c    |  202 +++
 .../ntnic/nthw/flow_filter/flow_nthw_hst.h    |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.c    |   93 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ifr.h    |   39 +
 .../ntnic/nthw/flow_filter/flow_nthw_info.c   |  341 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_info.h   |  104 ++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.c    |  234 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_ioa.h    |   80 ++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.c |  685 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_nthw_km.h |  224 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.c    |  230 ++++
 .../ntnic/nthw/flow_filter/flow_nthw_pdb.h    |   84 ++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.c    |  355 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_qsl.h    |  121 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.c    |  112 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rmc.h    |   40 +
 .../ntnic/nthw/flow_filter/flow_nthw_roa.c    |  294 +++++
 .../ntnic/nthw/flow_filter/flow_nthw_roa.h    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c |  132 ++
 .../ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h |   53 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc.c    |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc.h    |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.c |  109 ++
 .../ntnic/nthw/flow_filter/flow_nthw_slc_lr.h |   46 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c |  394 ++++++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h |   72 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.c |   96 ++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_ins.h |   42 +
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c |  165 +++
 .../ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h |   70 +
 41 files changed, 8731 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 2552b5d68d..8c065ee9a3 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -59,6 +60,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_filter/flow_nthw_cat.c',
+    'nthw/flow_filter/flow_nthw_csu.c',
+    'nthw/flow_filter/flow_nthw_flm.c',
+    'nthw/flow_filter/flow_nthw_hfu.c',
+    'nthw/flow_filter/flow_nthw_hsh.c',
+    'nthw/flow_filter/flow_nthw_hst.c',
+    'nthw/flow_filter/flow_nthw_ifr.c',
+    'nthw/flow_filter/flow_nthw_info.c',
+    'nthw/flow_filter/flow_nthw_ioa.c',
+    'nthw/flow_filter/flow_nthw_km.c',
+    'nthw/flow_filter/flow_nthw_pdb.c',
+    'nthw/flow_filter/flow_nthw_qsl.c',
+    'nthw/flow_filter/flow_nthw_rmc.c',
+    'nthw/flow_filter/flow_nthw_roa.c',
+    'nthw/flow_filter/flow_nthw_rpp_lr.c',
+    'nthw/flow_filter/flow_nthw_slc.c',
+    'nthw/flow_filter/flow_nthw_slc_lr.c',
+    'nthw/flow_filter/flow_nthw_tx_cpy.c',
+    'nthw/flow_filter/flow_nthw_tx_ins.c',
+    'nthw/flow_filter/flow_nthw_tx_rpl.c',
     'nthw/nthw_fpga_model.c',
     'nthw/nthw_dbs.c',
     'nthw/nthw_epp.c',
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
new file mode 100644
index 0000000000..91376363c1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.c
@@ -0,0 +1,1107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_cat.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct cat_nthw *cat_nthw_new(void)
+{
+	struct cat_nthw *p = malloc(sizeof(struct cat_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void cat_nthw_delete(struct cat_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_cat, n_debug_mode);
+}
+
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CAT, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Cat %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_cat = p_mod;
+
+	p->m_km_if_cnt = fpga_get_product_param(p->mp_fpga, NT_CAT_KM_IF_CNT, -1);
+
+	/* CFN */
+	p->mp_cfn_ctrl = module_get_register(p->m_cat, CAT_CFN_CTRL);
+	p->mp_cfn_addr = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_ADR);
+	p->mp_cfn_cnt = register_get_field(p->mp_cfn_ctrl, CAT_CFN_CTRL_CNT);
+	p->mp_cfn_data = module_get_register(p->m_cat, CAT_CFN_DATA);
+	p->mp_cfn_data_enable =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ENABLE);
+	p->mp_cfn_data_inv = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_INV);
+	p->mp_cfn_data_ptc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_INV);
+	p->mp_cfn_data_ptc_isl =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_ISL);
+	p->mp_cfn_data_ptc_mac =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MAC);
+	p->mp_cfn_data_ptc_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L2);
+	p->mp_cfn_data_ptc_vn_tag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VNTAG);
+	p->mp_cfn_data_ptc_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_VLAN);
+	p->mp_cfn_data_ptc_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_MPLS);
+	p->mp_cfn_data_ptc_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L3);
+	p->mp_cfn_data_ptc_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_FRAG);
+	p->mp_cfn_data_ptc_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_IP_PROT);
+	p->mp_cfn_data_ptc_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_L4);
+	p->mp_cfn_data_ptc_tunnel =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TUNNEL);
+	p->mp_cfn_data_ptc_tnl_l2 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L2);
+	p->mp_cfn_data_ptc_tnl_vlan =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_VLAN);
+	p->mp_cfn_data_ptc_tnl_mpls =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_MPLS);
+	p->mp_cfn_data_ptc_tnl_l3 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L3);
+	p->mp_cfn_data_ptc_tnl_frag =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_FRAG);
+	p->mp_cfn_data_ptc_tnl_ip_prot =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_IP_PROT);
+	p->mp_cfn_data_ptc_tnl_l4 =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_TNL_L4);
+	p->mp_cfn_data_err_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_INV);
+	p->mp_cfn_data_err_cv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_CV);
+	p->mp_cfn_data_err_fcs =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_FCS);
+	p->mp_cfn_data_err_trunc =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TRUNC);
+	p->mp_cfn_data_mac_port =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_MAC_PORT);
+	p->mp_cfn_data_pm_cmp =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMP);
+	p->mp_cfn_data_pm_dct =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_DCT);
+	p->mp_cfn_data_pm_ext_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_EXT_INV);
+	p->mp_cfn_data_pm_cmb =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_CMB);
+	p->mp_cfn_data_pm_and_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_AND_INV);
+	p->mp_cfn_data_pm_or_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_OR_INV);
+	p->mp_cfn_data_pm_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_PM_INV);
+	p->mp_cfn_data_lc = register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC);
+	p->mp_cfn_data_lc_inv =
+		register_get_field(p->mp_cfn_data, CAT_CFN_DATA_LC_INV);
+
+	if (p->m_km_if_cnt == -1) {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM_OR);
+	} else {
+		p->mp_cfn_data_km0_or =
+			register_get_field(p->mp_cfn_data, CAT_CFN_DATA_KM0_OR);
+		p->mp_cfn_data_km1_or =
+			register_query_field(p->mp_cfn_data, CAT_CFN_DATA_KM1_OR);
+	}
+
+	if (p->m_km_if_cnt < 0) {
+		/* KCE */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE_DATA);
+		p->mp_kce_data_enable[0] =
+			register_get_field(p->mp_kce_data[0], CAT_KCE_DATA_ENABLE);
+		/* KCS */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS_DATA_CATEGORY);
+		/* FTE */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE_DATA);
+		p->mp_fte_data_enable[0] =
+			register_get_field(p->mp_fte_data[0], CAT_FTE_DATA_ENABLE);
+	} else {
+		/* KCE 0 */
+		p->mp_kce_ctrl[0] = module_get_register(p->m_cat, CAT_KCE0_CTRL);
+		p->mp_kce_addr[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_ADR);
+		p->mp_kce_cnt[0] =
+			register_get_field(p->mp_kce_ctrl[0], CAT_KCE0_CTRL_CNT);
+		p->mp_kce_data[0] = module_get_register(p->m_cat, CAT_KCE0_DATA);
+		p->mp_kce_data_enable[0] = register_get_field(p->mp_kce_data[0],
+					CAT_KCE0_DATA_ENABLE);
+		/* KCS 0 */
+		p->mp_kcs_ctrl[0] = module_get_register(p->m_cat, CAT_KCS0_CTRL);
+		p->mp_kcs_addr[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_ADR);
+		p->mp_kcs_cnt[0] =
+			register_get_field(p->mp_kcs_ctrl[0], CAT_KCS0_CTRL_CNT);
+		p->mp_kcs_data[0] = module_get_register(p->m_cat, CAT_KCS0_DATA);
+		p->mp_kcs_data_category[0] =
+			register_get_field(p->mp_kcs_data[0], CAT_KCS0_DATA_CATEGORY);
+		/* FTE 0 */
+		p->mp_fte_ctrl[0] = module_get_register(p->m_cat, CAT_FTE0_CTRL);
+		p->mp_fte_addr[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_ADR);
+		p->mp_fte_cnt[0] =
+			register_get_field(p->mp_fte_ctrl[0], CAT_FTE0_CTRL_CNT);
+		p->mp_fte_data[0] = module_get_register(p->m_cat, CAT_FTE0_DATA);
+		p->mp_fte_data_enable[0] = register_get_field(p->mp_fte_data[0],
+					CAT_FTE0_DATA_ENABLE);
+		/* KCE 1 */
+		p->mp_kce_ctrl[1] = module_get_register(p->m_cat, CAT_KCE1_CTRL);
+		p->mp_kce_addr[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_ADR);
+		p->mp_kce_cnt[1] =
+			register_get_field(p->mp_kce_ctrl[1], CAT_KCE1_CTRL_CNT);
+		p->mp_kce_data[1] = module_get_register(p->m_cat, CAT_KCE1_DATA);
+		p->mp_kce_data_enable[1] = register_get_field(p->mp_kce_data[1],
+					CAT_KCE1_DATA_ENABLE);
+		/* KCS 1 */
+		p->mp_kcs_ctrl[1] = module_get_register(p->m_cat, CAT_KCS1_CTRL);
+		p->mp_kcs_addr[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_ADR);
+		p->mp_kcs_cnt[1] =
+			register_get_field(p->mp_kcs_ctrl[1], CAT_KCS1_CTRL_CNT);
+		p->mp_kcs_data[1] = module_get_register(p->m_cat, CAT_KCS1_DATA);
+		p->mp_kcs_data_category[1] =
+			register_get_field(p->mp_kcs_data[1], CAT_KCS1_DATA_CATEGORY);
+		/* FTE 1 */
+		p->mp_fte_ctrl[1] = module_get_register(p->m_cat, CAT_FTE1_CTRL);
+		p->mp_fte_addr[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_ADR);
+		p->mp_fte_cnt[1] =
+			register_get_field(p->mp_fte_ctrl[1], CAT_FTE1_CTRL_CNT);
+		p->mp_fte_data[1] = module_get_register(p->m_cat, CAT_FTE1_DATA);
+		p->mp_fte_data_enable[1] = register_get_field(p->mp_fte_data[1],
+					CAT_FTE1_DATA_ENABLE);
+	}
+
+	/* CTE */
+	p->mp_cte_ctrl = module_get_register(p->m_cat, CAT_CTE_CTRL);
+	p->mp_cte_addr = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_ADR);
+	p->mp_cte_cnt = register_get_field(p->mp_cte_ctrl, CAT_CTE_CTRL_CNT);
+	p->mp_cte_data = module_get_register(p->m_cat, CAT_CTE_DATA);
+	p->mp_cte_data_col =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COL_ENABLE);
+	p->mp_cte_data_cor =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_COR_ENABLE);
+	p->mp_cte_data_hsh =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_HSH_ENABLE);
+	p->mp_cte_data_qsl =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_QSL_ENABLE);
+	p->mp_cte_data_ipf =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_IPF_ENABLE);
+	p->mp_cte_data_slc =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_SLC_ENABLE);
+	p->mp_cte_data_pdb =
+		register_get_field(p->mp_cte_data, CAT_CTE_DATA_PDB_ENABLE);
+	p->mp_cte_data_msk =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_MSK_ENABLE);
+	p->mp_cte_data_hst =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_HST_ENABLE);
+	p->mp_cte_data_epp =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_EPP_ENABLE);
+	p->mp_cte_data_tpe =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_TPE_ENABLE);
+	p->mp_cte_data_rrb =
+		register_query_field(p->mp_cte_data, CAT_CTE_DATA_RRB_ENABLE);
+	/* CTS */
+	p->mp_cts_ctrl = module_get_register(p->m_cat, CAT_CTS_CTRL);
+	p->mp_cts_addr = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_ADR);
+	p->mp_cts_cnt = register_get_field(p->mp_cts_ctrl, CAT_CTS_CTRL_CNT);
+	p->mp_cts_data = module_get_register(p->m_cat, CAT_CTS_DATA);
+	p->mp_cts_data_cat_a = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_A);
+	p->mp_cts_data_cat_b = register_get_field(p->mp_cts_data, CAT_CTS_DATA_CAT_B);
+	/* COT */
+	p->mp_cot_ctrl = module_get_register(p->m_cat, CAT_COT_CTRL);
+	p->mp_cot_addr = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_ADR);
+	p->mp_cot_cnt = register_get_field(p->mp_cot_ctrl, CAT_COT_CTRL_CNT);
+	p->mp_cot_data = module_get_register(p->m_cat, CAT_COT_DATA);
+	p->mp_cot_data_color = register_get_field(p->mp_cot_data, CAT_COT_DATA_COLOR);
+	p->mp_cot_data_km = register_get_field(p->mp_cot_data, CAT_COT_DATA_KM);
+	p->mp_cot_data_nfv_sb =
+		register_query_field(p->mp_cot_data, CAT_COT_DATA_NFV_SB);
+	/* CCT */
+	p->mp_cct_ctrl = module_get_register(p->m_cat, CAT_CCT_CTRL);
+	p->mp_cct_addr = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_ADR);
+	p->mp_cct_cnt = register_get_field(p->mp_cct_ctrl, CAT_CCT_CTRL_CNT);
+	p->mp_cct_data = module_get_register(p->m_cat, CAT_CCT_DATA);
+	p->mp_cct_data_color = register_get_field(p->mp_cct_data, CAT_CCT_DATA_COLOR);
+	p->mp_cct_data_km = register_get_field(p->mp_cct_data, CAT_CCT_DATA_KM);
+	/* EXO */
+	p->mp_exo_ctrl = module_get_register(p->m_cat, CAT_EXO_CTRL);
+	p->mp_exo_addr = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_ADR);
+	p->mp_exo_cnt = register_get_field(p->mp_exo_ctrl, CAT_EXO_CTRL_CNT);
+	p->mp_exo_data = module_get_register(p->m_cat, CAT_EXO_DATA);
+	p->mp_exo_data_dyn = register_get_field(p->mp_exo_data, CAT_EXO_DATA_DYN);
+	p->mp_exo_data_ofs = register_get_field(p->mp_exo_data, CAT_EXO_DATA_OFS);
+	/* RCK */
+	p->mp_rck_ctrl = module_get_register(p->m_cat, CAT_RCK_CTRL);
+	p->mp_rck_addr = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_ADR);
+	p->mp_rck_cnt = register_get_field(p->mp_rck_ctrl, CAT_RCK_CTRL_CNT);
+	p->mp_rck_data = module_get_register(p->m_cat, CAT_RCK_DATA);
+	/* LEN */
+	p->mp_len_ctrl = module_get_register(p->m_cat, CAT_LEN_CTRL);
+	p->mp_len_addr = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_ADR);
+	p->mp_len_cnt = register_get_field(p->mp_len_ctrl, CAT_LEN_CTRL_CNT);
+	p->mp_len_data = module_get_register(p->m_cat, CAT_LEN_DATA);
+	p->mp_len_data_lower = register_get_field(p->mp_len_data, CAT_LEN_DATA_LOWER);
+	p->mp_len_data_upper = register_get_field(p->mp_len_data, CAT_LEN_DATA_UPPER);
+	p->mp_len_data_dyn1 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN1);
+	p->mp_len_data_dyn2 = register_get_field(p->mp_len_data, CAT_LEN_DATA_DYN2);
+	p->mp_len_data_inv = register_get_field(p->mp_len_data, CAT_LEN_DATA_INV);
+
+	p->mp_cfn_data_ptc_cfp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_PTC_CFP);
+	p->mp_cfn_data_err_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L3_CS);
+	p->mp_cfn_data_err_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_L4_CS);
+	p->mp_cfn_data_err_tnl_l3_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L3_CS);
+	p->mp_cfn_data_err_tnl_l4_cs =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_L4_CS);
+	p->mp_cfn_data_err_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TTL_EXP);
+	p->mp_cfn_data_err_tnl_ttl_exp =
+		register_query_field(p->mp_cfn_data, CAT_CFN_DATA_ERR_TNL_TTL_EXP);
+
+	p->mp_kcc_ctrl = module_query_register(p->m_cat, CAT_KCC_CTRL);
+	if (p->mp_kcc_ctrl != NULL) {
+		p->mp_kcc_addr =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_ADR);
+		p->mp_kcc_cnt =
+			register_query_field(p->mp_kcc_ctrl, CAT_KCC_CTRL_CNT);
+	}
+	p->mp_kcc_data = module_query_register(p->m_cat, CAT_KCC_DATA);
+	if (p->mp_kcc_data != NULL) {
+		p->mp_kcc_data_key =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_KEY);
+		p->mp_kcc_data_category =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_CATEGORY);
+		p->mp_kcc_data_id =
+			register_query_field(p->mp_kcc_data, CAT_KCC_DATA_ID);
+	}
+
+	p->mp_cce_ctrl = module_query_register(p->m_cat, CAT_CCE_CTRL);
+	if (p->mp_cce_ctrl != NULL) {
+		p->mp_cce_addr =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_ADR);
+		p->mp_cce_cnt =
+			register_query_field(p->mp_cce_ctrl, CAT_CCE_CTRL_CNT);
+	}
+	p->mp_cce_data = module_query_register(p->m_cat, CAT_CCE_DATA);
+	if (p->mp_cce_data != NULL) {
+		p->mp_cce_data_imm =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IMM);
+		p->mp_cce_data_ind =
+			register_query_field(p->mp_cce_data, CAT_CCE_DATA_IND);
+	}
+
+	p->mp_ccs_ctrl = module_query_register(p->m_cat, CAT_CCS_CTRL);
+	if (p->mp_ccs_ctrl != NULL) {
+		p->mp_ccs_addr =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_ADR);
+		p->mp_ccs_cnt =
+			register_query_field(p->mp_ccs_ctrl, CAT_CCS_CTRL_CNT);
+	}
+	p->mp_ccs_data = module_query_register(p->m_cat, CAT_CCS_DATA);
+	if (p->mp_ccs_data != NULL) {
+		p->mp_ccs_data_cor_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR_EN);
+		p->mp_ccs_data_cor =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_COR);
+		p->mp_ccs_data_hsh_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH_EN);
+		p->mp_ccs_data_hsh =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HSH);
+		p->mp_ccs_data_qsl_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL_EN);
+		p->mp_ccs_data_qsl =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_QSL);
+		p->mp_ccs_data_ipf_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF_EN);
+		p->mp_ccs_data_ipf =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_IPF);
+		p->mp_ccs_data_slc_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC_EN);
+		p->mp_ccs_data_slc =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SLC);
+		p->mp_ccs_data_pdb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB_EN);
+		p->mp_ccs_data_pdb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_PDB);
+		p->mp_ccs_data_msk_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK_EN);
+		p->mp_ccs_data_msk =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_MSK);
+		p->mp_ccs_data_hst_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST_EN);
+		p->mp_ccs_data_hst =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_HST);
+		p->mp_ccs_data_epp_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP_EN);
+		p->mp_ccs_data_epp =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_EPP);
+		p->mp_ccs_data_tpe_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE_EN);
+		p->mp_ccs_data_tpe =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_TPE);
+		p->mp_ccs_data_rrb_en =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB_EN);
+		p->mp_ccs_data_rrb =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_RRB);
+		p->mp_ccs_data_sb0_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_TYPE);
+		p->mp_ccs_data_sb0_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB0_DATA);
+		p->mp_ccs_data_sb1_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_TYPE);
+		p->mp_ccs_data_sb1_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB1_DATA);
+		p->mp_ccs_data_sb2_type =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_TYPE);
+		p->mp_ccs_data_sb2_data =
+			register_query_field(p->mp_ccs_data, CAT_CCS_DATA_SB2_DATA);
+	}
+
+	return 0;
+}
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_addr, val);
+}
+
+void r(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_cnt, val);
+}
+
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_enable, val);
+}
+
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_inv, val);
+}
+
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_inv, val);
+}
+
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_isl, val);
+}
+
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mac, val);
+}
+
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l2, val);
+}
+
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vn_tag, val);
+}
+
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l3, val);
+}
+
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_frag, val);
+}
+
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_l4, val);
+}
+
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tunnel, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l2, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_vlan, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_mpls, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l3, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_frag, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_ip_prot, val);
+}
+
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_ptc_tnl_l4, val);
+}
+
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_ptc_cfp);
+	field_set_val32(p->mp_cfn_data_ptc_cfp, val);
+}
+
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l3_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l3_cs, val);
+}
+
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_l4_cs);
+	field_set_val32(p->mp_cfn_data_err_tnl_l4_cs, val);
+}
+
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_err_tnl_ttl_exp);
+	field_set_val32(p->mp_cfn_data_err_tnl_ttl_exp, val);
+}
+
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_inv, val);
+}
+
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_cv, val);
+}
+
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_fcs, val);
+}
+
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_err_trunc, val);
+}
+
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_mac_port, val);
+}
+
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_cfn_data_pm_cmp, val, p->mp_cfn_data_pm_cmp->mn_words);
+}
+
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_dct, val);
+}
+
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_ext_inv, val);
+}
+
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_cmb, val);
+}
+
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_and_inv, val);
+}
+
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_or_inv, val);
+}
+
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_pm_inv, val);
+}
+
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc, val);
+}
+
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_lc_inv, val);
+}
+
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cfn_data_km0_or, val);
+}
+
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cfn_data_km1_or);
+	field_set_val32(p->mp_cfn_data_km1_or, val);
+}
+
+void cat_nthw_cfn_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cfn_ctrl, 1);
+	register_flush(p->mp_cfn_data, 1);
+}
+
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_addr[index], val);
+}
+
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_cnt[index], val);
+}
+
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kce_data_enable[index], val);
+}
+
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kce_ctrl[index], 1);
+	register_flush(p->mp_kce_data[index], 1);
+}
+
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_addr[index], val);
+}
+
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_cnt[index], val);
+}
+
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_kcs_data_category[index], val);
+}
+
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_kcs_ctrl[index], 1);
+	register_flush(p->mp_kcs_data[index], 1);
+}
+
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_addr[index], val);
+}
+
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_cnt[index], val);
+}
+
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val)
+{
+	field_set_val32(p->mp_fte_data_enable[index], val);
+}
+
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index)
+{
+	register_flush(p->mp_fte_ctrl[index], 1);
+	register_flush(p->mp_fte_data[index], 1);
+}
+
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_addr, val);
+}
+
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_cnt, val);
+}
+
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_col, val);
+}
+
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_cor, val);
+}
+
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_hsh, val);
+}
+
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_qsl, val);
+}
+
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_ipf, val);
+}
+
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_slc, val);
+}
+
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cte_data_pdb, val);
+}
+
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_msk);
+	field_set_val32(p->mp_cte_data_msk, val);
+}
+
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_hst);
+	field_set_val32(p->mp_cte_data_hst, val);
+}
+
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_epp);
+	field_set_val32(p->mp_cte_data_epp, val);
+}
+
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_tpe);
+	field_set_val32(p->mp_cte_data_tpe, val);
+}
+
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cte_data_rrb);
+	field_set_val32(p->mp_cte_data_rrb, val);
+}
+
+void cat_nthw_cte_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cte_ctrl, 1);
+	register_flush(p->mp_cte_data, 1);
+}
+
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_addr, val);
+}
+
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_cnt, val);
+}
+
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_a, val);
+}
+
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cts_data_cat_b, val);
+}
+
+void cat_nthw_cts_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cts_ctrl, 1);
+	register_flush(p->mp_cts_data, 1);
+}
+
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_addr, val);
+}
+
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_cnt, val);
+}
+
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_color, val);
+}
+
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cot_data_km, val);
+}
+
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cot_data_nfv_sb);
+	field_set_val32(p->mp_cot_data_nfv_sb, val);
+}
+
+void cat_nthw_cot_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cot_ctrl, 1);
+	register_flush(p->mp_cot_data, 1);
+}
+
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_addr, val);
+}
+
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_cnt, val);
+}
+
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_color, val);
+}
+
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cct_data_km, val);
+}
+
+void cat_nthw_cct_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_cct_ctrl, 1);
+	register_flush(p->mp_cct_data, 1);
+}
+
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_addr, val);
+}
+
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_cnt, val);
+}
+
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_exo_data_dyn, val);
+}
+
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_exo_data_ofs, val);
+}
+
+void cat_nthw_exo_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_exo_ctrl, 1);
+	register_flush(p->mp_exo_data, 1);
+}
+
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_addr, val);
+}
+
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rck_cnt, val);
+}
+
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val)
+{
+	register_set_val(p->mp_rck_data, &val, 1);
+	register_make_dirty(p->mp_rck_data);
+}
+
+void cat_nthw_rck_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_rck_ctrl, 1);
+	register_flush(p->mp_rck_data, 1);
+}
+
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_addr, val);
+}
+
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_cnt, val);
+}
+
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_lower, val);
+}
+
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_upper, val);
+}
+
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn1, val);
+}
+
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_dyn2, val);
+}
+
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_len_data_inv, val);
+}
+
+void cat_nthw_len_flush(const struct cat_nthw *p)
+{
+	register_flush(p->mp_len_ctrl, 1);
+	register_flush(p->mp_len_data, 1);
+}
+
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_addr);
+	field_set_val32(p->mp_kcc_addr, val);
+}
+
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_cnt);
+	field_set_val32(p->mp_kcc_cnt, val);
+}
+
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val)
+{
+	assert(p->mp_kcc_data_key);
+	field_set_val(p->mp_kcc_data_key, val, 2);
+}
+
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_category);
+	field_set_val32(p->mp_kcc_data_category, val);
+}
+
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_kcc_data_id);
+	field_set_val32(p->mp_kcc_data_id, val);
+}
+
+void cat_nthw_kcc_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_kcc_ctrl);
+	assert(p->mp_kcc_data);
+	register_flush(p->mp_kcc_ctrl, 1);
+	register_flush(p->mp_kcc_data, 1);
+}
+
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_addr);
+	field_set_val32(p->mp_cce_addr, val);
+}
+
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_cnt);
+	field_set_val32(p->mp_cce_cnt, val);
+}
+
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_imm);
+	field_set_val32(p->mp_cce_data_imm, val);
+}
+
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_cce_data_ind);
+	field_set_val32(p->mp_cce_data_ind, val);
+}
+
+void cat_nthw_cce_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_cce_ctrl);
+	assert(p->mp_cce_data);
+	register_flush(p->mp_cce_ctrl, 1);
+	register_flush(p->mp_cce_data, 1);
+}
+
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_addr);
+	field_set_val32(p->mp_ccs_addr, val);
+}
+
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val)
+{
+	assert(p->mp_ccs_cnt);
+	field_set_val32(p->mp_ccs_cnt, val);
+}
+
+#define CATNTHW_CCS_SET(name)                                             \
+	void cat_nthw_ccs_data_##name(const struct cat_nthw *p, uint32_t val) \
+	{                                                                 \
+		assert(p->mp_ccs_data_##name);                               \
+		field_set_val32(p->mp_ccs_data_##name, val);                  \
+	}
+
+CATNTHW_CCS_SET(cor_en);
+CATNTHW_CCS_SET(cor);
+CATNTHW_CCS_SET(hsh_en);
+CATNTHW_CCS_SET(hsh);
+CATNTHW_CCS_SET(qsl_en);
+CATNTHW_CCS_SET(qsl);
+CATNTHW_CCS_SET(ipf_en);
+CATNTHW_CCS_SET(ipf);
+CATNTHW_CCS_SET(slc_en);
+CATNTHW_CCS_SET(slc);
+CATNTHW_CCS_SET(pdb_en);
+CATNTHW_CCS_SET(pdb);
+CATNTHW_CCS_SET(msk_en);
+CATNTHW_CCS_SET(msk);
+CATNTHW_CCS_SET(hst_en);
+CATNTHW_CCS_SET(hst);
+CATNTHW_CCS_SET(epp_en);
+CATNTHW_CCS_SET(epp);
+CATNTHW_CCS_SET(tpe_en);
+CATNTHW_CCS_SET(tpe);
+CATNTHW_CCS_SET(rrb_en);
+CATNTHW_CCS_SET(rrb);
+CATNTHW_CCS_SET(sb0_type);
+CATNTHW_CCS_SET(sb0_data);
+CATNTHW_CCS_SET(sb1_type);
+CATNTHW_CCS_SET(sb1_data);
+CATNTHW_CCS_SET(sb2_type);
+CATNTHW_CCS_SET(sb2_data);
+
+void cat_nthw_ccs_flush(const struct cat_nthw *p)
+{
+	assert(p->mp_ccs_ctrl);
+	assert(p->mp_ccs_data);
+	register_flush(p->mp_ccs_ctrl, 1);
+	register_flush(p->mp_ccs_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
new file mode 100644
index 0000000000..41ac891a93
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_cat.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_CAT_H__
+#define __FLOW_NTHW_CAT_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct cat_nthw;
+
+typedef struct cat_nthw cat_nthw_t;
+
+struct cat_nthw *cat_nthw_new(void);
+void cat_nthw_delete(struct cat_nthw *p);
+int cat_nthw_init(struct cat_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int cat_nthw_setup(struct cat_nthw *p, int n_idx, int n_idx_cnt);
+void cat_nthw_set_debug_mode(struct cat_nthw *p, unsigned int n_debug_mode);
+
+/* CFN */
+void cat_nthw_cfn_select(const struct cat_nthw *p, uint32_t val);
+void r(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_enable(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_isl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_cfp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mac(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vn_tag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tunnel(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_vlan(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_mpls(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l3(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_frag(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_ip_prot(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_ptc_tnl_l4(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_cv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_fcs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_trunc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l3_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_l4_cs(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_err_tnl_ttl_exp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_mac_port(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmp(const struct cat_nthw *p, const uint32_t *val);
+void cat_nthw_cfn_pm_dct(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_ext_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_cmb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_and_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_or_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_pm_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_lc_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km0_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_km1_or(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cfn_flush(const struct cat_nthw *p);
+/* KCE 0/1 */
+void cat_nthw_kce_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kce_flush(const struct cat_nthw *p, int index);
+/* KCS 0/1 */
+void cat_nthw_kcs_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_category(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_kcs_flush(const struct cat_nthw *p, int index);
+/* FTE 0/1 */
+void cat_nthw_fte_select(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_cnt(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_enable(const struct cat_nthw *p, int index, uint32_t val);
+void cat_nthw_fte_flush(const struct cat_nthw *p, int index);
+/* CTE */
+void cat_nthw_cte_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_col(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_enable_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cte_flush(const struct cat_nthw *p);
+/* CTS */
+void cat_nthw_cts_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_flush(const struct cat_nthw *p);
+void cat_nthw_cts_cat_a(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cts_cat_b(const struct cat_nthw *p, uint32_t val);
+/* COT */
+void cat_nthw_cot_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_nfv_sb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cot_flush(const struct cat_nthw *p);
+/* CCT */
+void cat_nthw_cct_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_color(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_km(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cct_flush(const struct cat_nthw *p);
+/* EXO */
+void cat_nthw_exo_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_dyn(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_exo_ofs(const struct cat_nthw *p, int32_t val);
+void cat_nthw_exo_flush(const struct cat_nthw *p);
+/* RCK */
+void cat_nthw_rck_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_rck_flush(const struct cat_nthw *p);
+/* LEN */
+void cat_nthw_len_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_lower(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_upper(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn1(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_dyn2(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_inv(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_len_flush(const struct cat_nthw *p);
+/* KCC */
+void cat_nthw_kcc_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_key(const struct cat_nthw *p, uint32_t *val);
+void cat_nthw_kcc_category(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_id(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_kcc_flush(const struct cat_nthw *p);
+/* CCE */
+void cat_nthw_cce_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_imm(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_data_ind(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_cce_flush(const struct cat_nthw *p);
+/* CCS */
+void cat_nthw_ccs_select(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_cnt(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_cor(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hsh(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_qsl(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_ipf(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_slc(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_pdb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_msk(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_hst(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_epp(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_tpe(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb_en(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_rrb(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb0_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb1_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_type(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_data_sb2_data(const struct cat_nthw *p, uint32_t val);
+void cat_nthw_ccs_flush(const struct cat_nthw *p);
+
+struct cat_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	nt_module_t *m_cat;
+	int m_km_if_cnt;
+
+	nt_register_t *mp_cfn_ctrl;
+	nt_field_t *mp_cfn_addr;
+	nt_field_t *mp_cfn_cnt;
+	nt_register_t *mp_cfn_data;
+	nt_field_t *mp_cfn_data_enable;
+	nt_field_t *mp_cfn_data_inv;
+	nt_field_t *mp_cfn_data_ptc_inv;
+	nt_field_t *mp_cfn_data_ptc_isl;
+	nt_field_t *mp_cfn_data_ptc_cfp;
+	nt_field_t *mp_cfn_data_ptc_mac;
+	nt_field_t *mp_cfn_data_ptc_l2;
+	nt_field_t *mp_cfn_data_ptc_vn_tag;
+	nt_field_t *mp_cfn_data_ptc_vlan;
+	nt_field_t *mp_cfn_data_ptc_mpls;
+	nt_field_t *mp_cfn_data_ptc_l3;
+	nt_field_t *mp_cfn_data_ptc_frag;
+	nt_field_t *mp_cfn_data_ptc_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_l4;
+	nt_field_t *mp_cfn_data_ptc_tunnel;
+	nt_field_t *mp_cfn_data_ptc_tnl_l2;
+	nt_field_t *mp_cfn_data_ptc_tnl_vlan;
+	nt_field_t *mp_cfn_data_ptc_tnl_mpls;
+	nt_field_t *mp_cfn_data_ptc_tnl_l3;
+	nt_field_t *mp_cfn_data_ptc_tnl_frag;
+	nt_field_t *mp_cfn_data_ptc_tnl_ip_prot;
+	nt_field_t *mp_cfn_data_ptc_tnl_l4;
+	nt_field_t *mp_cfn_data_err_inv;
+	nt_field_t *mp_cfn_data_err_cv;
+	nt_field_t *mp_cfn_data_err_fcs;
+	nt_field_t *mp_cfn_data_err_trunc;
+	nt_field_t *mp_cfn_data_err_l3_cs;
+	nt_field_t *mp_cfn_data_err_l4_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l3_cs;
+	nt_field_t *mp_cfn_data_err_tnl_l4_cs;
+	nt_field_t *mp_cfn_data_err_ttl_exp;
+	nt_field_t *mp_cfn_data_err_tnl_ttl_exp;
+	nt_field_t *mp_cfn_data_mac_port;
+	nt_field_t *mp_cfn_data_pm_cmp;
+	nt_field_t *mp_cfn_data_pm_dct;
+	nt_field_t *mp_cfn_data_pm_ext_inv;
+	nt_field_t *mp_cfn_data_pm_cmb;
+	nt_field_t *mp_cfn_data_pm_and_inv;
+	nt_field_t *mp_cfn_data_pm_or_inv;
+	nt_field_t *mp_cfn_data_pm_inv;
+	nt_field_t *mp_cfn_data_lc;
+	nt_field_t *mp_cfn_data_lc_inv;
+	nt_field_t *mp_cfn_data_km0_or;
+	nt_field_t *mp_cfn_data_km1_or;
+
+	nt_register_t *mp_kce_ctrl[2];
+	nt_field_t *mp_kce_addr[2];
+	nt_field_t *mp_kce_cnt[2];
+	nt_register_t *mp_kce_data[2];
+	nt_field_t *mp_kce_data_enable[2];
+
+	nt_register_t *mp_kcs_ctrl[2];
+	nt_field_t *mp_kcs_addr[2];
+	nt_field_t *mp_kcs_cnt[2];
+	nt_register_t *mp_kcs_data[2];
+	nt_field_t *mp_kcs_data_category[2];
+
+	nt_register_t *mp_fte_ctrl[2];
+	nt_field_t *mp_fte_addr[2];
+	nt_field_t *mp_fte_cnt[2];
+	nt_register_t *mp_fte_data[2];
+	nt_field_t *mp_fte_data_enable[2];
+
+	nt_register_t *mp_cte_ctrl;
+	nt_field_t *mp_cte_addr;
+	nt_field_t *mp_cte_cnt;
+	nt_register_t *mp_cte_data;
+	nt_field_t *mp_cte_data_col;
+	nt_field_t *mp_cte_data_cor;
+	nt_field_t *mp_cte_data_hsh;
+	nt_field_t *mp_cte_data_qsl;
+	nt_field_t *mp_cte_data_ipf;
+	nt_field_t *mp_cte_data_slc;
+	nt_field_t *mp_cte_data_pdb;
+	nt_field_t *mp_cte_data_msk;
+	nt_field_t *mp_cte_data_hst;
+	nt_field_t *mp_cte_data_epp;
+	nt_field_t *mp_cte_data_tpe;
+	nt_field_t *mp_cte_data_rrb;
+
+	nt_register_t *mp_cts_ctrl;
+	nt_field_t *mp_cts_addr;
+	nt_field_t *mp_cts_cnt;
+	nt_register_t *mp_cts_data;
+	nt_field_t *mp_cts_data_cat_a;
+	nt_field_t *mp_cts_data_cat_b;
+
+	nt_register_t *mp_cot_ctrl;
+	nt_field_t *mp_cot_addr;
+	nt_field_t *mp_cot_cnt;
+	nt_register_t *mp_cot_data;
+	nt_field_t *mp_cot_data_color;
+	nt_field_t *mp_cot_data_km;
+	nt_field_t *mp_cot_data_nfv_sb;
+
+	nt_register_t *mp_cct_ctrl;
+	nt_field_t *mp_cct_addr;
+	nt_field_t *mp_cct_cnt;
+	nt_register_t *mp_cct_data;
+	nt_field_t *mp_cct_data_color;
+	nt_field_t *mp_cct_data_km;
+
+	nt_register_t *mp_exo_ctrl;
+	nt_field_t *mp_exo_addr;
+	nt_field_t *mp_exo_cnt;
+	nt_register_t *mp_exo_data;
+	nt_field_t *mp_exo_data_dyn;
+	nt_field_t *mp_exo_data_ofs;
+
+	nt_register_t *mp_rck_ctrl;
+	nt_field_t *mp_rck_addr;
+	nt_field_t *mp_rck_cnt;
+	nt_register_t *mp_rck_data;
+
+	nt_register_t *mp_len_ctrl;
+	nt_field_t *mp_len_addr;
+	nt_field_t *mp_len_cnt;
+	nt_register_t *mp_len_data;
+	nt_field_t *mp_len_data_lower;
+	nt_field_t *mp_len_data_upper;
+	nt_field_t *mp_len_data_dyn1;
+	nt_field_t *mp_len_data_dyn2;
+	nt_field_t *mp_len_data_inv;
+	nt_register_t *mp_kcc_ctrl;
+	nt_field_t *mp_kcc_addr;
+	nt_field_t *mp_kcc_cnt;
+
+	nt_register_t *mp_kcc_data;
+	nt_field_t *mp_kcc_data_key;
+	nt_field_t *mp_kcc_data_category;
+	nt_field_t *mp_kcc_data_id;
+
+	nt_register_t *mp_cce_ctrl;
+	nt_field_t *mp_cce_addr;
+	nt_field_t *mp_cce_cnt;
+
+	nt_register_t *mp_cce_data;
+	nt_field_t *mp_cce_data_imm;
+	nt_field_t *mp_cce_data_ind;
+
+	nt_register_t *mp_ccs_ctrl;
+	nt_field_t *mp_ccs_addr;
+	nt_field_t *mp_ccs_cnt;
+
+	nt_register_t *mp_ccs_data;
+	nt_field_t *mp_ccs_data_cor_en;
+	nt_field_t *mp_ccs_data_cor;
+
+	nt_field_t *mp_ccs_data_hsh_en;
+	nt_field_t *mp_ccs_data_hsh;
+	nt_field_t *mp_ccs_data_qsl_en;
+	nt_field_t *mp_ccs_data_qsl;
+	nt_field_t *mp_ccs_data_ipf_en;
+	nt_field_t *mp_ccs_data_ipf;
+	nt_field_t *mp_ccs_data_slc_en;
+	nt_field_t *mp_ccs_data_slc;
+	nt_field_t *mp_ccs_data_pdb_en;
+	nt_field_t *mp_ccs_data_pdb;
+	nt_field_t *mp_ccs_data_msk_en;
+	nt_field_t *mp_ccs_data_msk;
+	nt_field_t *mp_ccs_data_hst_en;
+	nt_field_t *mp_ccs_data_hst;
+	nt_field_t *mp_ccs_data_epp_en;
+	nt_field_t *mp_ccs_data_epp;
+	nt_field_t *mp_ccs_data_tpe_en;
+	nt_field_t *mp_ccs_data_tpe;
+	nt_field_t *mp_ccs_data_rrb_en;
+	nt_field_t *mp_ccs_data_rrb;
+	nt_field_t *mp_ccs_data_sb0_type;
+	nt_field_t *mp_ccs_data_sb0_data;
+	nt_field_t *mp_ccs_data_sb1_type;
+	nt_field_t *mp_ccs_data_sb1_data;
+	nt_field_t *mp_ccs_data_sb2_type;
+	nt_field_t *mp_ccs_data_sb2_data;
+};
+
+#endif /* __FLOW_NTHW_CAT_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
new file mode 100644
index 0000000000..5a7f90ad69
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_csu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
+struct csu_nthw *csu_nthw_new(void)
+{
+	struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void csu_nthw_delete(struct csu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_CSU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Csu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_csu = p_mod;
+
+	p->mp_rcp_ctrl = module_get_register(p->m_csu, CSU_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, CSU_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_csu, CSU_RCP_DATA);
+	p->mp_rcp_data_ol3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL3_CMD);
+	p->mp_rcp_data_ol4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_OL4_CMD);
+	p->mp_rcp_data_il3_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL3_CMD);
+	p->mp_rcp_data_il4_cmd =
+		register_get_field(p->mp_rcp_data, CSU_RCP_DATA_IL4_CMD);
+
+	return 0;
+}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for outer layer3.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for outer layer4.
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L3 calc method for inner layer3 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+	/*
+	 * Select L4 calc method for inner layer4 (tunneled).
+	 * 0: Do not touch checksum field.
+	 * 1: Check, but do not touch checksum field.
+	 * 2: Insert checksum header value for BAD checksum.
+	 * 3: Insert checksum header value for GOOD checksum.
+	 * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+	 * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+	 * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+	 *    otherwise GOOD checksum.
+	 * 7: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4 and UDP, otherwise
+	 *    GOOD checksum.
+	 */
+	field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
new file mode 100644
index 0000000000..6cb0e1f781
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_csu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_NTHW_CSU_H_
+#define _FLOW_NTHW_CSU_H_
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct csu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_csu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_ol3_cmd;
+	nt_field_t *mp_rcp_data_ol4_cmd;
+	nt_field_t *mp_rcp_data_il3_cmd;
+	nt_field_t *mp_rcp_data_il4_cmd;
+};
+
+struct csu_nthw *csu_nthw_new(void);
+void csu_nthw_delete(struct csu_nthw *p);
+int csu_nthw_init(struct csu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
+
+#endif /* _FLOW_NTHW_CSU_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
new file mode 100644
index 0000000000..4549898cc1
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_rac.h"
+
+#include "flow_nthw_flm.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+struct flm_nthw *flm_nthw_new(void)
+{
+	struct flm_nthw *p = malloc(sizeof(struct flm_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void flm_nthw_delete(struct flm_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_flm, n_debug_mode);
+}
+
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_FLM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Flm %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_rac = p_fpga->p_fpga_info->mp_nthw_rac;
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_flm = p_mod;
+
+	p->mp_control = module_get_register(p->m_flm, FLM_CONTROL);
+	p->mp_control_enable =
+		register_get_field(p->mp_control, FLM_CONTROL_ENABLE);
+	p->mp_control_init = register_get_field(p->mp_control, FLM_CONTROL_INIT);
+	p->mp_control_lds = register_get_field(p->mp_control, FLM_CONTROL_LDS);
+	p->mp_control_lfs = register_get_field(p->mp_control, FLM_CONTROL_LFS);
+	p->mp_control_lis = register_get_field(p->mp_control, FLM_CONTROL_LIS);
+	p->mp_control_uds = register_get_field(p->mp_control, FLM_CONTROL_UDS);
+	p->mp_control_uis = register_get_field(p->mp_control, FLM_CONTROL_UIS);
+	p->mp_control_rds = register_get_field(p->mp_control, FLM_CONTROL_RDS);
+	p->mp_control_ris = register_get_field(p->mp_control, FLM_CONTROL_RIS);
+	p->mp_control_pds = register_query_field(p->mp_control, FLM_CONTROL_PDS);
+	p->mp_control_pis = register_query_field(p->mp_control, FLM_CONTROL_PIS);
+	p->mp_control_crcwr = register_get_field(p->mp_control, FLM_CONTROL_CRCWR);
+	p->mp_control_crcrd = register_get_field(p->mp_control, FLM_CONTROL_CRCRD);
+	p->mp_control_rbl = register_get_field(p->mp_control, FLM_CONTROL_RBL);
+	p->mp_control_eab = register_get_field(p->mp_control, FLM_CONTROL_EAB);
+	p->mp_control_split_sdram_usage =
+		register_get_field(p->mp_control, FLM_CONTROL_SPLIT_SDRAM_USAGE);
+
+	p->mp_status = module_get_register(p->m_flm, FLM_STATUS);
+	p->mp_status_calibdone =
+		register_get_field(p->mp_status, FLM_STATUS_CALIBDONE);
+	p->mp_status_initdone =
+		register_get_field(p->mp_status, FLM_STATUS_INITDONE);
+	p->mp_status_idle = register_get_field(p->mp_status, FLM_STATUS_IDLE);
+	p->mp_status_critical =
+		register_get_field(p->mp_status, FLM_STATUS_CRITICAL);
+	p->mp_status_panic = register_get_field(p->mp_status, FLM_STATUS_PANIC);
+	p->mp_status_crcerr = register_get_field(p->mp_status, FLM_STATUS_CRCERR);
+	p->mp_status_eft_bp = register_get_field(p->mp_status, FLM_STATUS_EFT_BP);
+
+	p->mp_timeout = module_get_register(p->m_flm, FLM_TIMEOUT);
+	p->mp_timeout_t = register_get_field(p->mp_timeout, FLM_TIMEOUT_T);
+
+	p->mp_scrub = module_get_register(p->m_flm, FLM_SCRUB);
+	p->mp_scrub_i = register_get_field(p->mp_scrub, FLM_SCRUB_I);
+
+	p->mp_load_bin = module_get_register(p->m_flm, FLM_LOAD_BIN);
+	p->mp_load_bin_bin = register_get_field(p->mp_load_bin, FLM_LOAD_BIN_BIN);
+
+	p->mp_load_pps = module_get_register(p->m_flm, FLM_LOAD_PPS);
+	p->mp_load_pps_pps = register_get_field(p->mp_load_pps, FLM_LOAD_PPS_PPS);
+
+	p->mp_load_lps = module_get_register(p->m_flm, FLM_LOAD_LPS);
+	p->mp_load_lps_lps = register_get_field(p->mp_load_lps, FLM_LOAD_LPS_LPS);
+
+	p->mp_load_aps = module_get_register(p->m_flm, FLM_LOAD_APS);
+	p->mp_load_aps_aps = register_get_field(p->mp_load_aps, FLM_LOAD_APS_APS);
+
+	p->mp_prio = module_get_register(p->m_flm, FLM_PRIO);
+	p->mp_prio_limit0 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT0);
+	p->mp_prio_ft0 = register_get_field(p->mp_prio, FLM_PRIO_FT0);
+	p->mp_prio_limit1 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT1);
+	p->mp_prio_ft1 = register_get_field(p->mp_prio, FLM_PRIO_FT1);
+	p->mp_prio_limit2 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT2);
+	p->mp_prio_ft2 = register_get_field(p->mp_prio, FLM_PRIO_FT2);
+	p->mp_prio_limit3 = register_get_field(p->mp_prio, FLM_PRIO_LIMIT3);
+	p->mp_prio_ft3 = register_get_field(p->mp_prio, FLM_PRIO_FT3);
+
+	p->mp_pst_ctrl = module_get_register(p->m_flm, FLM_PST_CTRL);
+	p->mp_pst_ctrl_adr = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_ADR);
+	p->mp_pst_ctrl_cnt = register_get_field(p->mp_pst_ctrl, FLM_PST_CTRL_CNT);
+	p->mp_pst_data = module_get_register(p->m_flm, FLM_PST_DATA);
+	p->mp_pst_data_bp = register_get_field(p->mp_pst_data, FLM_PST_DATA_BP);
+	p->mp_pst_data_pp = register_get_field(p->mp_pst_data, FLM_PST_DATA_PP);
+	p->mp_pst_data_tp = register_get_field(p->mp_pst_data, FLM_PST_DATA_TP);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_flm, FLM_RCP_CTRL);
+	p->mp_rcp_ctrl_adr = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, FLM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_flm, FLM_RCP_DATA);
+	p->mp_rcp_data_lookup =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_LOOKUP);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW0_SEL);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_sw8_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_sw8_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_OFS);
+	p->mp_rcp_data_sw8_sel =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW8_SEL);
+	p->mp_rcp_data_sw9_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_DYN);
+	p->mp_rcp_data_sw9_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_SW9_OFS);
+	p->mp_rcp_data_mask = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_MASK);
+	p->mp_rcp_data_kid = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_KID);
+	p->mp_rcp_data_opn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_OPN);
+	p->mp_rcp_data_ipn = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_IPN);
+	p->mp_rcp_data_byt_dyn =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_DYN);
+	p->mp_rcp_data_byt_ofs =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_BYT_OFS);
+	p->mp_rcp_data_txplm = register_get_field(p->mp_rcp_data, FLM_RCP_DATA_TXPLM);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_get_field(p->mp_rcp_data, FLM_RCP_DATA_AUTO_IPV4_MASK);
+
+	p->mp_buf_ctrl = module_get_register(p->m_flm, FLM_BUF_CTRL);
+
+	p->mp_lrn_data = module_get_register(p->m_flm, FLM_LRN_DATA);
+	p->mp_inf_data = module_get_register(p->m_flm, FLM_INF_DATA);
+	p->mp_sta_data = module_get_register(p->m_flm, FLM_STA_DATA);
+
+	p->mp_stat_lrn_done = module_get_register(p->m_flm, FLM_STAT_LRN_DONE);
+	p->mp_stat_lrn_done_cnt =
+		register_get_field(p->mp_stat_lrn_done, FLM_STAT_LRN_DONE_CNT);
+
+	p->mp_stat_lrn_ignore = module_get_register(p->m_flm, FLM_STAT_LRN_IGNORE);
+	p->mp_stat_lrn_ignore_cnt =
+		register_get_field(p->mp_stat_lrn_ignore, FLM_STAT_LRN_IGNORE_CNT);
+
+	p->mp_stat_lrn_fail = module_get_register(p->m_flm, FLM_STAT_LRN_FAIL);
+	p->mp_stat_lrn_fail_cnt =
+		register_get_field(p->mp_stat_lrn_fail, FLM_STAT_LRN_FAIL_CNT);
+
+	p->mp_stat_unl_done = module_get_register(p->m_flm, FLM_STAT_UNL_DONE);
+	p->mp_stat_unl_done_cnt =
+		register_get_field(p->mp_stat_unl_done, FLM_STAT_UNL_DONE_CNT);
+
+	p->mp_stat_unl_ignore = module_get_register(p->m_flm, FLM_STAT_UNL_IGNORE);
+	p->mp_stat_unl_ignore_cnt =
+		register_get_field(p->mp_stat_unl_ignore, FLM_STAT_UNL_IGNORE_CNT);
+
+	p->mp_stat_prb_done = module_query_register(p->m_flm, FLM_STAT_PRB_DONE);
+	p->mp_stat_prb_done_cnt =
+		register_query_field(p->mp_stat_prb_done, FLM_STAT_PRB_DONE_CNT);
+
+	p->mp_stat_prb_ignore = module_query_register(p->m_flm, FLM_STAT_PRB_IGNORE);
+	p->mp_stat_prb_ignore_cnt = register_query_field(p->mp_stat_prb_ignore,
+				FLM_STAT_PRB_IGNORE_CNT);
+
+	p->mp_stat_rel_done = module_get_register(p->m_flm, FLM_STAT_REL_DONE);
+	p->mp_stat_rel_done_cnt =
+		register_get_field(p->mp_stat_rel_done, FLM_STAT_REL_DONE_CNT);
+
+	p->mp_stat_rel_ignore = module_get_register(p->m_flm, FLM_STAT_REL_IGNORE);
+	p->mp_stat_rel_ignore_cnt =
+		register_get_field(p->mp_stat_rel_ignore, FLM_STAT_REL_IGNORE_CNT);
+
+	p->mp_stat_aul_done = module_get_register(p->m_flm, FLM_STAT_AUL_DONE);
+	p->mp_stat_aul_done_cnt =
+		register_get_field(p->mp_stat_aul_done, FLM_STAT_AUL_DONE_CNT);
+
+	p->mp_stat_aul_ignore = module_get_register(p->m_flm, FLM_STAT_AUL_IGNORE);
+	p->mp_stat_aul_ignore_cnt =
+		register_get_field(p->mp_stat_aul_ignore, FLM_STAT_AUL_IGNORE_CNT);
+
+	p->mp_stat_aul_fail = module_get_register(p->m_flm, FLM_STAT_AUL_FAIL);
+	p->mp_stat_aul_fail_cnt =
+		register_get_field(p->mp_stat_aul_fail, FLM_STAT_AUL_FAIL_CNT);
+
+	p->mp_stat_tul_done = module_get_register(p->m_flm, FLM_STAT_TUL_DONE);
+	p->mp_stat_tul_done_cnt =
+		register_get_field(p->mp_stat_tul_done, FLM_STAT_TUL_DONE_CNT);
+
+	p->mp_stat_flows = module_get_register(p->m_flm, FLM_STAT_FLOWS);
+	p->mp_stat_flows_cnt =
+		register_get_field(p->mp_stat_flows, FLM_STAT_FLOWS_CNT);
+
+	p->mp_stat_sta_done = module_query_register(p->m_flm, FLM_STAT_STA_DONE);
+	p->mp_stat_sta_done_cnt =
+		register_query_field(p->mp_stat_sta_done, FLM_STAT_STA_DONE_CNT);
+
+	p->mp_stat_inf_done = module_query_register(p->m_flm, FLM_STAT_INF_DONE);
+	p->mp_stat_inf_done_cnt =
+		register_query_field(p->mp_stat_inf_done, FLM_STAT_INF_DONE_CNT);
+
+	p->mp_stat_inf_skip = module_query_register(p->m_flm, FLM_STAT_INF_SKIP);
+	p->mp_stat_inf_skip_cnt =
+		register_query_field(p->mp_stat_inf_skip, FLM_STAT_INF_SKIP_CNT);
+
+	p->mp_stat_pck_hit = module_query_register(p->m_flm, FLM_STAT_PCK_HIT);
+	p->mp_stat_pck_hit_cnt =
+		register_query_field(p->mp_stat_pck_hit, FLM_STAT_PCK_HIT_CNT);
+
+	p->mp_stat_pck_miss = module_query_register(p->m_flm, FLM_STAT_PCK_MISS);
+	p->mp_stat_pck_miss_cnt =
+		register_query_field(p->mp_stat_pck_miss, FLM_STAT_PCK_MISS_CNT);
+
+	p->mp_stat_pck_unh = module_query_register(p->m_flm, FLM_STAT_PCK_UNH);
+	p->mp_stat_pck_unh_cnt =
+		register_query_field(p->mp_stat_pck_unh, FLM_STAT_PCK_UNH_CNT);
+
+	p->mp_stat_pck_dis = module_query_register(p->m_flm, FLM_STAT_PCK_DIS);
+	p->mp_stat_pck_dis_cnt =
+		register_query_field(p->mp_stat_pck_dis, FLM_STAT_PCK_DIS_CNT);
+
+	p->mp_stat_csh_hit = module_query_register(p->m_flm, FLM_STAT_CSH_HIT);
+	p->mp_stat_csh_hit_cnt =
+		register_query_field(p->mp_stat_csh_hit, FLM_STAT_CSH_HIT_CNT);
+
+	p->mp_stat_csh_miss = module_query_register(p->m_flm, FLM_STAT_CSH_MISS);
+	p->mp_stat_csh_miss_cnt =
+		register_query_field(p->mp_stat_csh_miss, FLM_STAT_CSH_MISS_CNT);
+
+	p->mp_stat_csh_unh = module_query_register(p->m_flm, FLM_STAT_CSH_UNH);
+	p->mp_stat_csh_unh_cnt =
+		register_query_field(p->mp_stat_csh_unh, FLM_STAT_CSH_UNH_CNT);
+
+	p->mp_stat_cuc_start = module_query_register(p->m_flm, FLM_STAT_CUC_START);
+	p->mp_stat_cuc_start_cnt =
+		register_query_field(p->mp_stat_cuc_start, FLM_STAT_CUC_START_CNT);
+
+	p->mp_stat_cuc_move = module_query_register(p->m_flm, FLM_STAT_CUC_MOVE);
+	p->mp_stat_cuc_move_cnt =
+		register_query_field(p->mp_stat_cuc_move, FLM_STAT_CUC_MOVE_CNT);
+
+	return 0;
+}
+
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_enable, val);
+}
+
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_init, val);
+}
+
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lds, val);
+}
+
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lfs, val);
+}
+
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_lis, val);
+}
+
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uds, val);
+}
+
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_uis, val);
+}
+
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rds, val);
+}
+
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_ris, val);
+}
+
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pds);
+	field_set_val32(p->mp_control_pds, val);
+}
+
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val)
+{
+	assert(p->mp_control_pis);
+	field_set_val32(p->mp_control_pis, val);
+}
+
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcwr, val);
+}
+
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_crcrd, val);
+}
+
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_rbl, val);
+}
+
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_eab, val);
+}
+
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_control_split_sdram_usage, val);
+}
+
+void flm_nthw_control_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_control, 1);
+}
+
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_calibdone);
+}
+
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_initdone);
+}
+
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_idle);
+}
+
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_critical);
+
+	else
+		field_set_val32(p->mp_status_critical, *val);
+}
+
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_panic);
+
+	else
+		field_set_val32(p->mp_status_panic, *val);
+}
+
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_crcerr);
+
+	else
+		field_set_val32(p->mp_status_crcerr, *val);
+}
+
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_status_eft_bp);
+}
+
+void flm_nthw_status_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_status, 1);
+}
+
+void flm_nthw_status_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_status);
+}
+
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_timeout_t, val);
+}
+
+void flm_nthw_timeout_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_timeout, 1);
+}
+
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_scrub_i, val);
+}
+
+void flm_nthw_scrub_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_scrub, 1);
+}
+
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_bin_bin, val);
+}
+
+void flm_nthw_load_bin_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_bin, 1);
+}
+
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_pps_pps, val);
+}
+
+void flm_nthw_load_pps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_pps, 1);
+}
+
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_lps_lps, val);
+}
+
+void flm_nthw_load_lps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_lps, 1);
+}
+
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_load_aps_aps, val);
+}
+
+void flm_nthw_load_aps_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_load_aps, 1);
+}
+
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit0, val);
+}
+
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft0, val);
+}
+
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit1, val);
+}
+
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft1, val);
+}
+
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit2, val);
+}
+
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft2, val);
+}
+
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_limit3, val);
+}
+
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_prio_ft3, val);
+}
+
+void flm_nthw_prio_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_prio, 1);
+}
+
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_adr, val);
+}
+
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_ctrl_cnt, val);
+}
+
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_bp, val);
+}
+
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_pp, val);
+}
+
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_pst_data_tp, val);
+}
+
+void flm_nthw_pst_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_pst_ctrl, 1);
+	register_flush(p->mp_pst_data, 1);
+}
+
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_lookup, val);
+}
+
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+}
+
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel, val);
+}
+
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+}
+
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_dyn, val);
+}
+
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_ofs, val);
+}
+
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw8_sel, val);
+}
+
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_dyn, val);
+}
+
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw9_ofs, val);
+}
+
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask, val, 10);
+}
+
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kid, val);
+}
+
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_opn, val);
+}
+
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ipn, val);
+}
+
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_dyn, val);
+}
+
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_byt_ofs, val);
+}
+
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_txplm, val);
+}
+
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void flm_nthw_rcp_flush(const struct flm_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address = register_get_address(p->mp_lrn_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr bc_buf;
+
+	if (nthw_rac_rab_dma_begin(rac) == 0) {
+		/* Announce the number of words to write to LRN_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_write32_dma(rac, address, bus_id, word_count, data);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_infdata = register_get_address(p->mp_inf_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from INF_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = word_count << 16;
+		bufctrl_data[1] = 0;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_infdata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail)
+{
+	int ret = -1;
+
+	struct nthw_rac *rac = (struct nthw_rac *)p->mp_rac;
+	uint32_t address_stadata = register_get_address(p->mp_sta_data);
+	uint32_t address_bufctrl = register_get_address(p->mp_buf_ctrl);
+	rab_bus_id_t bus_id = 1;
+	struct dma_buf_ptr buf;
+	struct dma_buf_ptr bc_buf;
+
+	ret = nthw_rac_rab_dma_begin(rac);
+	if (ret == 0) {
+		/* Announce the number of words to read from STA_DATA */
+		uint32_t bufctrl_data[2];
+
+		bufctrl_data[0] = 0;
+		bufctrl_data[1] = word_count;
+		nthw_rac_rab_write32_dma(rac, address_bufctrl, bus_id, 2,
+					bufctrl_data);
+		nthw_rac_rab_read32_dma(rac, address_stadata, bus_id, word_count,
+				       &buf);
+		nthw_rac_rab_read32_dma(rac, address_bufctrl, bus_id, 2, &bc_buf);
+		ret = nthw_rac_rab_dma_commit(rac);
+		if (ret != 0)
+			return ret;
+
+		uint32_t mask = buf.size - 1;
+		uint32_t index = buf.index;
+
+		for (uint32_t i = 0; i < word_count; ++index, ++i)
+			data[i] = buf.base[index & mask];
+
+		uint32_t bc_mask = bc_buf.size - 1;
+		uint32_t bc_index = bc_buf.index;
+		*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
+		*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
+		*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+	}
+
+	return ret;
+}
+
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_done_cnt);
+}
+
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_done);
+}
+
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_ignore_cnt);
+}
+
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_ignore);
+}
+
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_lrn_fail_cnt);
+}
+
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_lrn_fail);
+}
+
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_done_cnt);
+}
+
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_done);
+}
+
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_unl_ignore_cnt);
+}
+
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_unl_ignore);
+}
+
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_done_cnt);
+}
+
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_done);
+	register_update(p->mp_stat_prb_done);
+}
+
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_prb_ignore_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_prb_ignore_cnt);
+}
+
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_prb_ignore);
+	register_update(p->mp_stat_prb_ignore);
+}
+
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_done_cnt);
+}
+
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_done);
+}
+
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_rel_ignore_cnt);
+}
+
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_rel_ignore);
+}
+
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_done_cnt);
+}
+
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_done);
+}
+
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_ignore_cnt);
+}
+
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_ignore);
+}
+
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_aul_fail_cnt);
+}
+
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_aul_fail);
+}
+
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_tul_done_cnt);
+}
+
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_tul_done);
+}
+
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	if (get)
+		*val = field_get_val32(p->mp_stat_flows_cnt);
+}
+
+void flm_nthw_stat_flows_update(const struct flm_nthw *p)
+{
+	register_update(p->mp_stat_flows);
+}
+
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_sta_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_sta_done_cnt);
+}
+
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_sta_done);
+	register_update(p->mp_stat_sta_done);
+}
+
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_done_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_done_cnt);
+}
+
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_done);
+	register_update(p->mp_stat_inf_done);
+}
+
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_inf_skip_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_inf_skip_cnt);
+}
+
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_inf_skip);
+	register_update(p->mp_stat_inf_skip);
+}
+
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_hit_cnt);
+}
+
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_hit);
+	register_update(p->mp_stat_pck_hit);
+}
+
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_miss_cnt);
+}
+
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_miss);
+	register_update(p->mp_stat_pck_miss);
+}
+
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_unh_cnt);
+}
+
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_unh);
+	register_update(p->mp_stat_pck_unh);
+}
+
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_pck_dis_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_pck_dis_cnt);
+}
+
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_pck_dis);
+	register_update(p->mp_stat_pck_dis);
+}
+
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_hit_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_hit_cnt);
+}
+
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_hit);
+	register_update(p->mp_stat_csh_hit);
+}
+
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_miss_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_miss_cnt);
+}
+
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_miss);
+	register_update(p->mp_stat_csh_miss);
+}
+
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_csh_unh_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_csh_unh_cnt);
+}
+
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_csh_unh);
+	register_update(p->mp_stat_csh_unh);
+}
+
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_start_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_start_cnt);
+}
+
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_start);
+	register_update(p->mp_stat_cuc_start);
+}
+
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get)
+{
+	assert(p->mp_stat_cuc_move_cnt);
+	if (get)
+		*val = field_get_val32(p->mp_stat_cuc_move_cnt);
+}
+
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p)
+{
+	assert(p->mp_stat_cuc_move);
+	register_update(p->mp_stat_cuc_move);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
new file mode 100644
index 0000000000..4796d43940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_FLM_H__
+#define __FLOW_NTHW_FLM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct flm_nthw;
+
+typedef struct flm_nthw flm_nthw_t;
+
+struct flm_nthw *flm_nthw_new(void);
+void flm_nthw_delete(struct flm_nthw *p);
+int flm_nthw_init(struct flm_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+void flm_nthw_set_debug_mode(struct flm_nthw *p, unsigned int n_debug_mode);
+
+/* Control */
+void flm_nthw_control_enable(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_init(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lfs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_lis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_uis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_ris(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pds(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_pis(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcwr(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_crcrd(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_rbl(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_eab(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_split_sdram_usage(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_control_flush(const struct flm_nthw *p);
+
+/* Status */
+void flm_nthw_status_calibdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_initdone(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_idle(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_critical(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_panic(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_crcerr(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_eft_bp(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_status_flush(const struct flm_nthw *p);
+void flm_nthw_status_update(const struct flm_nthw *p);
+
+/* Timeout */
+void flm_nthw_timeout_t(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_timeout_flush(const struct flm_nthw *p);
+
+/* Scrub */
+void flm_nthw_scrub_i(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_scrub_flush(const struct flm_nthw *p);
+
+/* Load BIN */
+void flm_nthw_load_bin(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_bin_flush(const struct flm_nthw *p);
+
+/* Load PPS */
+void flm_nthw_load_pps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_pps_flush(const struct flm_nthw *p);
+
+/* Load LPS */
+void flm_nthw_load_lps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_lps_flush(const struct flm_nthw *p);
+
+/* Load APS */
+void flm_nthw_load_aps(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_load_aps_flush(const struct flm_nthw *p);
+
+/* Prio */
+void flm_nthw_prio_limit0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft0(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft1(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft2(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_limit3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_ft3(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_prio_flush(const struct flm_nthw *p);
+
+/* PST */
+void flm_nthw_pst_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_bp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_pp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_tp(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_pst_flush(const struct flm_nthw *p);
+
+/* RCP */
+void flm_nthw_rcp_select(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_cnt(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_lookup(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw0_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_qw4_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw8_sel(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_sw9_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_mask(const struct flm_nthw *p, const uint32_t *val);
+void flm_nthw_rcp_kid(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_opn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_ipn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_dyn(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_byt_ofs(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_txplm(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_auto_ipv4_mask(const struct flm_nthw *p, uint32_t val);
+void flm_nthw_rcp_flush(const struct flm_nthw *p);
+
+/* Buf Ctrl */
+int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Lrn Data */
+int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data,
+			 uint32_t word_count, uint32_t *lrn_free,
+			 uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Inf Data */
+int flm_nthw_inf_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Sta Data */
+int flm_nthw_sta_data_update(const struct flm_nthw *p, uint32_t *data,
+			  uint32_t word_count, uint32_t *lrn_free,
+			  uint32_t *inf_avail, uint32_t *sta_avail);
+
+/* Stat Lrn _done */
+void flm_nthw_stat_lrn_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_done_update(const struct flm_nthw *p);
+
+/* Stat Lrn Ignore */
+void flm_nthw_stat_lrn_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_ignore_update(const struct flm_nthw *p);
+
+/* Stat Lrn Fail */
+void flm_nthw_stat_lrn_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_lrn_fail_update(const struct flm_nthw *p);
+
+/* Stat Unl _done */
+void flm_nthw_stat_unl_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_done_update(const struct flm_nthw *p);
+
+/* Stat Unl Ignore */
+void flm_nthw_stat_unl_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_unl_ignore_update(const struct flm_nthw *p);
+
+/* Stat Prb _done */
+void flm_nthw_stat_prb_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_done_update(const struct flm_nthw *p);
+
+/* Stat Prb Ignore */
+void flm_nthw_stat_prb_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_prb_ignore_update(const struct flm_nthw *p);
+
+/* Stat Rel _done */
+void flm_nthw_stat_rel_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_done_update(const struct flm_nthw *p);
+
+/* Stat Rel Ignore */
+void flm_nthw_stat_rel_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_rel_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul _done */
+void flm_nthw_stat_aul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_done_update(const struct flm_nthw *p);
+
+/* Stat Aul Ignore */
+void flm_nthw_stat_aul_ignore_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_ignore_update(const struct flm_nthw *p);
+
+/* Stat Aul Fail */
+void flm_nthw_stat_aul_fail_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_aul_fail_update(const struct flm_nthw *p);
+
+/* Stat Tul _done */
+void flm_nthw_stat_tul_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_tul_done_update(const struct flm_nthw *p);
+
+/* Stat Flows */
+void flm_nthw_stat_flows_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_flows_update(const struct flm_nthw *p);
+
+/* Stat Sta _done */
+void flm_nthw_stat_sta_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_sta_done_update(const struct flm_nthw *p);
+
+/* Stat Inf _done */
+void flm_nthw_stat_inf_done_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_done_update(const struct flm_nthw *p);
+
+/* Stat Inf Skip */
+void flm_nthw_stat_inf_skip_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_inf_skip_update(const struct flm_nthw *p);
+
+/* Stat Pck Hit */
+void flm_nthw_stat_pck_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_hit_update(const struct flm_nthw *p);
+
+/* Stat Pck Miss */
+void flm_nthw_stat_pck_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_miss_update(const struct flm_nthw *p);
+
+/* Stat Pck Unh */
+void flm_nthw_stat_pck_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_unh_update(const struct flm_nthw *p);
+
+/* Stat Pck Dis */
+void flm_nthw_stat_pck_dis_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_pck_dis_update(const struct flm_nthw *p);
+
+/* Stat Csh Hit */
+void flm_nthw_stat_csh_hit_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_hit_update(const struct flm_nthw *p);
+
+/* Stat Csh Miss */
+void flm_nthw_stat_csh_miss_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_miss_update(const struct flm_nthw *p);
+
+/* Stat Csh Unh */
+void flm_nthw_stat_csh_unh_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_csh_unh_update(const struct flm_nthw *p);
+
+/* Stat Cuc Start */
+void flm_nthw_stat_cuc_start_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_start_update(const struct flm_nthw *p);
+
+/* Stat Cuc Move */
+void flm_nthw_stat_cuc_move_cnt(const struct flm_nthw *p, uint32_t *val, int get);
+void flm_nthw_stat_cuc_move_update(const struct flm_nthw *p);
+
+struct flm_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	void *mp_rac;
+
+	nt_module_t *m_flm;
+
+	nt_register_t *mp_control;
+	nt_field_t *mp_control_enable;
+	nt_field_t *mp_control_init;
+	nt_field_t *mp_control_lds;
+	nt_field_t *mp_control_lfs;
+	nt_field_t *mp_control_lis;
+	nt_field_t *mp_control_uds;
+	nt_field_t *mp_control_uis;
+	nt_field_t *mp_control_rds;
+	nt_field_t *mp_control_ris;
+	nt_field_t *mp_control_pds;
+	nt_field_t *mp_control_pis;
+	nt_field_t *mp_control_crcwr;
+	nt_field_t *mp_control_crcrd;
+	nt_field_t *mp_control_rbl;
+	nt_field_t *mp_control_eab;
+	nt_field_t *mp_control_split_sdram_usage;
+
+	nt_register_t *mp_status;
+	nt_field_t *mp_status_calibdone;
+	nt_field_t *mp_status_initdone;
+	nt_field_t *mp_status_idle;
+	nt_field_t *mp_status_critical;
+	nt_field_t *mp_status_panic;
+	nt_field_t *mp_status_crcerr;
+	nt_field_t *mp_status_eft_bp;
+
+	nt_register_t *mp_timeout;
+	nt_field_t *mp_timeout_t;
+
+	nt_register_t *mp_scrub;
+	nt_field_t *mp_scrub_i;
+
+	nt_register_t *mp_load_bin;
+	nt_field_t *mp_load_bin_bin;
+
+	nt_register_t *mp_load_pps;
+	nt_field_t *mp_load_pps_pps;
+
+	nt_register_t *mp_load_lps;
+	nt_field_t *mp_load_lps_lps;
+
+	nt_register_t *mp_load_aps;
+	nt_field_t *mp_load_aps_aps;
+
+	nt_register_t *mp_prio;
+	nt_field_t *mp_prio_limit0;
+	nt_field_t *mp_prio_ft0;
+	nt_field_t *mp_prio_limit1;
+	nt_field_t *mp_prio_ft1;
+	nt_field_t *mp_prio_limit2;
+	nt_field_t *mp_prio_ft2;
+	nt_field_t *mp_prio_limit3;
+	nt_field_t *mp_prio_ft3;
+
+	nt_register_t *mp_pst_ctrl;
+	nt_field_t *mp_pst_ctrl_adr;
+	nt_field_t *mp_pst_ctrl_cnt;
+	nt_register_t *mp_pst_data;
+	nt_field_t *mp_pst_data_bp;
+	nt_field_t *mp_pst_data_pp;
+	nt_field_t *mp_pst_data_tp;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_adr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_lookup;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_mask;
+	nt_field_t *mp_rcp_data_kid;
+	nt_field_t *mp_rcp_data_opn;
+	nt_field_t *mp_rcp_data_ipn;
+	nt_field_t *mp_rcp_data_byt_dyn;
+	nt_field_t *mp_rcp_data_byt_ofs;
+	nt_field_t *mp_rcp_data_txplm;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+
+	nt_register_t *mp_buf_ctrl;
+	nt_field_t *mp_buf_ctrl_lrn_free;
+	nt_field_t *mp_buf_ctrl_inf_avail;
+	nt_field_t *mp_buf_ctrl_sta_avail;
+
+	nt_register_t *mp_lrn_data;
+	nt_register_t *mp_inf_data;
+	nt_register_t *mp_sta_data;
+
+	nt_register_t *mp_stat_lrn_done;
+	nt_field_t *mp_stat_lrn_done_cnt;
+
+	nt_register_t *mp_stat_lrn_ignore;
+	nt_field_t *mp_stat_lrn_ignore_cnt;
+
+	nt_register_t *mp_stat_lrn_fail;
+	nt_field_t *mp_stat_lrn_fail_cnt;
+
+	nt_register_t *mp_stat_unl_done;
+	nt_field_t *mp_stat_unl_done_cnt;
+
+	nt_register_t *mp_stat_unl_ignore;
+	nt_field_t *mp_stat_unl_ignore_cnt;
+
+	nt_register_t *mp_stat_prb_done;
+	nt_field_t *mp_stat_prb_done_cnt;
+
+	nt_register_t *mp_stat_prb_ignore;
+	nt_field_t *mp_stat_prb_ignore_cnt;
+
+	nt_register_t *mp_stat_rel_done;
+	nt_field_t *mp_stat_rel_done_cnt;
+
+	nt_register_t *mp_stat_rel_ignore;
+	nt_field_t *mp_stat_rel_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_done;
+	nt_field_t *mp_stat_aul_done_cnt;
+
+	nt_register_t *mp_stat_aul_ignore;
+	nt_field_t *mp_stat_aul_ignore_cnt;
+
+	nt_register_t *mp_stat_aul_fail;
+	nt_field_t *mp_stat_aul_fail_cnt;
+
+	nt_register_t *mp_stat_tul_done;
+	nt_field_t *mp_stat_tul_done_cnt;
+
+	nt_register_t *mp_stat_flows;
+	nt_field_t *mp_stat_flows_cnt;
+
+	nt_register_t *mp_stat_sta_done;
+	nt_field_t *mp_stat_sta_done_cnt;
+
+	nt_register_t *mp_stat_inf_done;
+	nt_field_t *mp_stat_inf_done_cnt;
+
+	nt_register_t *mp_stat_inf_skip;
+	nt_field_t *mp_stat_inf_skip_cnt;
+
+	nt_register_t *mp_stat_pck_hit;
+	nt_field_t *mp_stat_pck_hit_cnt;
+
+	nt_register_t *mp_stat_pck_miss;
+	nt_field_t *mp_stat_pck_miss_cnt;
+
+	nt_register_t *mp_stat_pck_unh;
+	nt_field_t *mp_stat_pck_unh_cnt;
+
+	nt_register_t *mp_stat_pck_dis;
+	nt_field_t *mp_stat_pck_dis_cnt;
+
+	nt_register_t *mp_stat_csh_hit;
+	nt_field_t *mp_stat_csh_hit_cnt;
+
+	nt_register_t *mp_stat_csh_miss;
+	nt_field_t *mp_stat_csh_miss_cnt;
+
+	nt_register_t *mp_stat_csh_unh;
+	nt_field_t *mp_stat_csh_unh_cnt;
+
+	nt_register_t *mp_stat_cuc_start;
+	nt_field_t *mp_stat_cuc_start_cnt;
+
+	nt_register_t *mp_stat_cuc_move;
+	nt_field_t *mp_stat_cuc_move_cnt;
+};
+
+#endif /* __FLOW_NTHW_FLM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
new file mode 100644
index 0000000000..b7fe7c5863
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hfu.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
+struct hfu_nthw *hfu_nthw_new(void)
+{
+	struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void hfu_nthw_delete(struct hfu_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hfu %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hfu = fpga_query_module(p_fpga, MOD_HFU, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_hfu, HFU_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HFU_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hfu, HFU_RCP_DATA);
+	p->mp_rcp_data_len_a_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_WR);
+	p->mp_rcp_data_len_a_ol4len =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_OL4LEN);
+	p->mp_rcp_data_len_a_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_DYN);
+	p->mp_rcp_data_len_a_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_POS_OFS);
+	p->mp_rcp_data_len_a_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_DYN);
+	p->mp_rcp_data_len_a_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_ADD_OFS);
+	p->mp_rcp_data_len_a_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_A_SUB_DYN);
+	p->mp_rcp_data_len_b_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_WR);
+	p->mp_rcp_data_len_b_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_DYN);
+	p->mp_rcp_data_len_b_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_POS_OFS);
+	p->mp_rcp_data_len_b_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_DYN);
+	p->mp_rcp_data_len_b_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_ADD_OFS);
+	p->mp_rcp_data_len_b_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_B_SUB_DYN);
+	p->mp_rcp_data_len_c_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_WR);
+	p->mp_rcp_data_len_c_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_DYN);
+	p->mp_rcp_data_len_c_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_POS_OFS);
+	p->mp_rcp_data_len_c_add_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_DYN);
+	p->mp_rcp_data_len_c_add_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_ADD_OFS);
+	p->mp_rcp_data_len_c_sub_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_LEN_C_SUB_DYN);
+	p->mp_rcp_data_ttl_wr =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_WR);
+	p->mp_rcp_data_ttl_pos_dyn =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_DYN);
+	p->mp_rcp_data_ttl_pos_ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TTL_POS_OFS);
+	p->mp_rcp_data_csinf = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_CSINF);
+	p->mp_rcp_data_l3prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3PRT);
+	p->mp_rcp_data_l3frag =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L3FRAG);
+	p->mp_rcp_data_tunnel =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_TUNNEL);
+	p->mp_rcp_data_l4prt = register_get_field(p->mp_rcp_data, HFU_RCP_DATA_L4PRT);
+	p->mp_rcp_data_ol3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL3OFS);
+	p->mp_rcp_data_ol4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_OL4OFS);
+	p->mp_rcp_data_il3ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL3OFS);
+	p->mp_rcp_data_il4ofs =
+		register_get_field(p->mp_rcp_data, HFU_RCP_DATA_IL4OFS);
+
+	return 0;
+}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_csinf, val);
+}
+
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3prt, val);
+}
+
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l3frag, val);
+}
+
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel, val);
+}
+
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_l4prt, val);
+}
+
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol3ofs, val);
+}
+
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ol4ofs, val);
+}
+
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il3ofs, val);
+}
+
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_il4ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
new file mode 100644
index 0000000000..ecba1a8822
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hfu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HFU_H__
+#define __FLOW_NTHW_HFU_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct hfu_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hfu;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_len_a_wr;
+	nt_field_t *mp_rcp_data_len_a_ol4len;
+	nt_field_t *mp_rcp_data_len_a_pos_dyn;
+	nt_field_t *mp_rcp_data_len_a_pos_ofs;
+	nt_field_t *mp_rcp_data_len_a_add_dyn;
+	nt_field_t *mp_rcp_data_len_a_add_ofs;
+	nt_field_t *mp_rcp_data_len_a_sub_dyn;
+	nt_field_t *mp_rcp_data_len_b_wr;
+	nt_field_t *mp_rcp_data_len_b_pos_dyn;
+	nt_field_t *mp_rcp_data_len_b_pos_ofs;
+	nt_field_t *mp_rcp_data_len_b_add_dyn;
+	nt_field_t *mp_rcp_data_len_b_add_ofs;
+	nt_field_t *mp_rcp_data_len_b_sub_dyn;
+	nt_field_t *mp_rcp_data_len_c_wr;
+	nt_field_t *mp_rcp_data_len_c_pos_dyn;
+	nt_field_t *mp_rcp_data_len_c_pos_ofs;
+	nt_field_t *mp_rcp_data_len_c_add_dyn;
+	nt_field_t *mp_rcp_data_len_c_add_ofs;
+	nt_field_t *mp_rcp_data_len_c_sub_dyn;
+	nt_field_t *mp_rcp_data_ttl_wr;
+	nt_field_t *mp_rcp_data_ttl_pos_dyn;
+	nt_field_t *mp_rcp_data_ttl_pos_ofs;
+	nt_field_t *mp_rcp_data_csinf;
+	nt_field_t *mp_rcp_data_l3prt;
+	nt_field_t *mp_rcp_data_l3frag;
+	nt_field_t *mp_rcp_data_tunnel;
+	nt_field_t *mp_rcp_data_l4prt;
+	nt_field_t *mp_rcp_data_ol3ofs;
+	nt_field_t *mp_rcp_data_ol4ofs;
+	nt_field_t *mp_rcp_data_il3ofs;
+	nt_field_t *mp_rcp_data_il4ofs;
+};
+
+struct hfu_nthw *hfu_nthw_new(void);
+void hfu_nthw_delete(struct hfu_nthw *p);
+int hfu_nthw_init(struct hfu_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_csinf(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l3frag(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_tunnel(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_l4prt(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ol4ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il3ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_il4ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
+
+#endif /* __FLOW_NTHW_HFU_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
new file mode 100644
index 0000000000..0dc6434e88
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hsh.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hsh, n_debug_mode);
+}
+
+struct hsh_nthw *hsh_nthw_new(void)
+{
+	struct hsh_nthw *p = malloc(sizeof(struct hsh_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hsh_nthw_delete(struct hsh_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HSH, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hsh %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hsh = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hsh, HSH_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HSH_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_hsh, HSH_RCP_DATA);
+	p->mp_rcp_data_load_dist_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_LOAD_DIST_TYPE);
+	p->mp_rcp_data_mac_port_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_MAC_PORT_MASK);
+	p->mp_rcp_data_sort = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SORT);
+	p->mp_rcp_data_qw0_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_PE);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw4_pe =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_PE);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_w8_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_PE);
+	p->mp_rcp_data_w8_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_OFS);
+	p->mp_rcp_data_w8_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W8_SORT);
+	p->mp_rcp_data_w9_pe = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_PE);
+	p->mp_rcp_data_w9_ofs =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_OFS);
+	p->mp_rcp_data_w9_sort =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_SORT);
+	p->mp_rcp_data_w9_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_W9_P);
+	p->mp_rcp_data_p_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_P_MASK);
+	p->mp_rcp_data_word_mask =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_WORD_MASK);
+	p->mp_rcp_data_seed = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_SEED);
+	p->mp_rcp_data_tnl_p = register_get_field(p->mp_rcp_data, HSH_RCP_DATA_TNL_P);
+	p->mp_rcp_data_hsh_valid =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_VALID);
+	p->mp_rcp_data_hsh_type =
+		register_get_field(p->mp_rcp_data, HSH_RCP_DATA_HSH_TYPE);
+	p->mp_rcp_data_auto_ipv4_mask =
+		register_query_field(p->mp_rcp_data, HSH_RCP_DATA_AUTO_IPV4_MASK);
+
+	/* Init */
+	uint32_t val[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	field_set_val32(p->mp_rcp_addr, 0);
+	field_set_val32(p->mp_rcp_cnt, 1);
+
+	field_set_val32(p->mp_rcp_data_load_dist_type, 0);
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+	field_set_val32(p->mp_rcp_data_sort, 0);
+	field_set_val32(p->mp_rcp_data_qw0_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw0_ofs, 0);
+	field_set_val32(p->mp_rcp_data_qw4_pe, 0);
+	field_set_val32(p->mp_rcp_data_qw4_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_pe, 0);
+	field_set_val32(p->mp_rcp_data_w8_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w8_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_pe, 0);
+	field_set_val32(p->mp_rcp_data_w9_ofs, 0);
+	field_set_val32(p->mp_rcp_data_w9_sort, 0);
+	field_set_val32(p->mp_rcp_data_w9_p, 0);
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+	field_set_val32(p->mp_rcp_data_seed, 0);
+	field_set_val32(p->mp_rcp_data_tnl_p, 0);
+	field_set_val32(p->mp_rcp_data_hsh_valid, 0);
+	field_set_val32(p->mp_rcp_data_hsh_type, 31);
+
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+
+	return 0;
+}
+
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_load_dist_type, val);
+}
+
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mac_port_mask, val,
+		     p->mp_rcp_data_mac_port_mask->mn_words);
+}
+
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sort, val);
+}
+
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_pe, val);
+}
+
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+}
+
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_pe, val);
+}
+
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_pe, val);
+}
+
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_ofs, val);
+}
+
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w8_sort, val);
+}
+
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_pe, val);
+}
+
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_ofs, val);
+}
+
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_sort, val);
+}
+
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_w9_p, val);
+}
+
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_p_mask, val);
+}
+
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_word_mask, val, 10);
+}
+
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_seed, val);
+}
+
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tnl_p, val);
+}
+
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_valid, val);
+}
+
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_hsh_type, val);
+}
+
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_auto_ipv4_mask)
+		field_set_val32(p->mp_rcp_data_auto_ipv4_mask, val);
+}
+
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
new file mode 100644
index 0000000000..7cb7dbb743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hsh.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HSH_H__
+#define __FLOW_NTHW_HSH_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hsh_nthw;
+
+typedef struct hsh_nthw hsh_nthw_t;
+
+struct hsh_nthw *hsh_nthw_new(void);
+void hsh_nthw_delete(struct hsh_nthw *p);
+int hsh_nthw_init(struct hsh_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hsh_nthw_setup(struct hsh_nthw *p, int n_idx, int n_idx_cnt);
+void hsh_nthw_set_debug_mode(struct hsh_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hsh_nthw_rcp_select(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_cnt(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_load_dist_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_mac_port_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw0_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_qw4_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_qw4_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w8_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w8_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_pe(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_ofs(const struct hsh_nthw *p, int32_t val);
+void hsh_nthw_rcp_w9_sort(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_w9_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_p_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_word_mask(const struct hsh_nthw *p, uint32_t *val);
+void hsh_nthw_rcp_seed(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_tnl_p(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_valid(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_hsh_type(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_auto_ipv4_mask(const struct hsh_nthw *p, uint32_t val);
+void hsh_nthw_rcp_flush(const struct hsh_nthw *p);
+
+struct hsh_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hsh;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_load_dist_type;
+	nt_field_t *mp_rcp_data_mac_port_mask;
+	nt_field_t *mp_rcp_data_sort;
+	nt_field_t *mp_rcp_data_qw0_pe;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw4_pe;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_w8_pe;
+	nt_field_t *mp_rcp_data_w8_ofs;
+	nt_field_t *mp_rcp_data_w8_sort;
+	nt_field_t *mp_rcp_data_w9_pe;
+	nt_field_t *mp_rcp_data_w9_ofs;
+	nt_field_t *mp_rcp_data_w9_sort;
+	nt_field_t *mp_rcp_data_w9_p;
+	nt_field_t *mp_rcp_data_p_mask;
+	nt_field_t *mp_rcp_data_word_mask;
+	nt_field_t *mp_rcp_data_seed;
+	nt_field_t *mp_rcp_data_tnl_p;
+	nt_field_t *mp_rcp_data_hsh_valid;
+	nt_field_t *mp_rcp_data_hsh_type;
+	nt_field_t *mp_rcp_data_auto_ipv4_mask;
+};
+
+#endif /* __FLOW_NTHW_HSH_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
new file mode 100644
index 0000000000..fc3dc443a2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.c
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_hst.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_hst, n_debug_mode);
+}
+
+struct hst_nthw *hst_nthw_new(void)
+{
+	struct hst_nthw *p = malloc(sizeof(struct hst_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void hst_nthw_delete(struct hst_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_HST, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Hst %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_hst = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_hst, HST_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, HST_RCP_CTRL_CNT);
+
+	p->mp_rcp_data = module_get_register(p->m_hst, HST_RCP_DATA);
+	p->mp_rcp_data_strip_mode =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_STRIP_MODE);
+	p->mp_rcp_data_start_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_DYN);
+	p->mp_rcp_data_start_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_START_OFS);
+	p->mp_rcp_data_end_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_DYN);
+	p->mp_rcp_data_end_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_END_OFS);
+	p->mp_rcp_data_modif0_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_CMD);
+	p->mp_rcp_data_modif0_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_DYN);
+	p->mp_rcp_data_modif0_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_OFS);
+	p->mp_rcp_data_modif0_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF0_VALUE);
+	p->mp_rcp_data_modif1_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_CMD);
+	p->mp_rcp_data_modif1_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_DYN);
+	p->mp_rcp_data_modif1_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_OFS);
+	p->mp_rcp_data_modif1_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF1_VALUE);
+	p->mp_rcp_data_modif2_cmd =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_CMD);
+	p->mp_rcp_data_modif2_dyn =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_DYN);
+	p->mp_rcp_data_modif2_ofs =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_OFS);
+	p->mp_rcp_data_modif2_value =
+		register_get_field(p->mp_rcp_data, HST_RCP_DATA_MODIF2_VALUE);
+
+	return 0;
+}
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_strip_mode, val);
+}
+
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_dyn, val);
+}
+
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_start_ofs, val);
+}
+
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_dyn, val);
+}
+
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_end_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_cmd, val);
+}
+
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_dyn, val);
+}
+
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_ofs, val);
+}
+
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif0_value, val);
+}
+
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_cmd, val);
+}
+
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_dyn, val);
+}
+
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_ofs, val);
+}
+
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif1_value, val);
+}
+
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_cmd, val);
+}
+
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_dyn, val);
+}
+
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_ofs, val);
+}
+
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_modif2_value, val);
+}
+
+void hst_nthw_rcp_flush(const struct hst_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
new file mode 100644
index 0000000000..5bc7eb6e55
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_hst.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_HST_H__
+#define __FLOW_NTHW_HST_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct hst_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_hst;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_strip_mode;
+	nt_field_t *mp_rcp_data_start_dyn;
+	nt_field_t *mp_rcp_data_start_ofs;
+	nt_field_t *mp_rcp_data_end_dyn;
+	nt_field_t *mp_rcp_data_end_ofs;
+	nt_field_t *mp_rcp_data_modif0_cmd;
+	nt_field_t *mp_rcp_data_modif0_dyn;
+	nt_field_t *mp_rcp_data_modif0_ofs;
+	nt_field_t *mp_rcp_data_modif0_value;
+	nt_field_t *mp_rcp_data_modif1_cmd;
+	nt_field_t *mp_rcp_data_modif1_dyn;
+	nt_field_t *mp_rcp_data_modif1_ofs;
+	nt_field_t *mp_rcp_data_modif1_value;
+	nt_field_t *mp_rcp_data_modif2_cmd;
+	nt_field_t *mp_rcp_data_modif2_dyn;
+	nt_field_t *mp_rcp_data_modif2_ofs;
+	nt_field_t *mp_rcp_data_modif2_value;
+};
+
+typedef struct hst_nthw hst_nthw_t;
+
+struct hst_nthw *hst_nthw_new(void);
+void hst_nthw_delete(struct hst_nthw *p);
+int hst_nthw_init(struct hst_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int hst_nthw_setup(struct hst_nthw *p, int n_idx, int n_idx_cnt);
+void hst_nthw_set_debug_mode(struct hst_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hst_nthw_rcp_select(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_cnt(const struct hst_nthw *p, uint32_t val);
+
+void hst_nthw_rcp_strip_mode(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_start_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_end_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif0_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif1_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_cmd(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_dyn(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_ofs(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_modif2_value(const struct hst_nthw *p, uint32_t val);
+void hst_nthw_rcp_flush(const struct hst_nthw *p);
+
+#endif /* __FLOW_NTHW_HST_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
new file mode 100644
index 0000000000..0f51a36e57
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ifr.h"
+
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
+struct ifr_nthw *ifr_nthw_new(void)
+{
+	struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void ifr_nthw_delete(struct ifr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ifr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ifr = fpga_query_module(p_fpga, MOD_IFR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_ifr, IFR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IFR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ifr, IFR_RCP_DATA);
+	p->mp_rcp_data_en = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_EN);
+	p->mp_rcp_data_mtu = register_get_field(p->mp_rcp_data, IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_en, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_en);
+	field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
new file mode 100644
index 0000000000..626ca3d193
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ifr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IFR_H__
+#define __FLOW_NTHW_IFR_H__
+
+#include "nthw_fpga_model.h"
+
+struct ifr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ifr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_en;
+	nt_field_t *mp_rcp_data_mtu;
+};
+
+struct ifr_nthw *ifr_nthw_new(void);
+void ifr_nthw_delete(struct ifr_nthw *p);
+int ifr_nthw_init(struct ifr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
+
+#endif /* __FLOW_NTHW_IFR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
new file mode 100644
index 0000000000..27b55e3b7c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nt_util.h"
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+#include "nthw_fpga_model.h"
+
+#include "flow_nthw_info.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+static inline unsigned int clamp_one(unsigned int val)
+{
+	return val > 1 ? 1 : val;
+}
+
+struct info_nthw *info_nthw_new(void)
+{
+	struct info_nthw *p = malloc(sizeof(struct info_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void info_nthw_delete(struct info_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	assert(n_instance >= 0 && n_instance < 256);
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+
+	unsigned int km_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_KM_PRESENT, 0));
+	unsigned int kcc_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CAT_KCC_PRESENT, 0));
+	unsigned int ioa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_IOA_PRESENT, 0));
+	unsigned int roa_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_ROA_PRESENT, 0));
+	unsigned int dbs_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0));
+	unsigned int flm_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_FLM_PRESENT, 0));
+	unsigned int hst_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HST_PRESENT, 0));
+
+	/* Modules for Tx Packet Edit function */
+	unsigned int hfu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_HFU_PRESENT, 0));
+	unsigned int tx_cpy_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_CPY_PRESENT, 0));
+	unsigned int tx_ins_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_INS_PRESENT, 0));
+	unsigned int tx_rpl_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_TX_RPL_PRESENT, 0));
+	unsigned int csu_present =
+		clamp_one(fpga_get_product_param(p_fpga, NT_CSU_PRESENT, 0));
+	unsigned int tpe_present = (hfu_present && tx_cpy_present && tx_ins_present &&
+				   tx_rpl_present && csu_present) ?
+				  1 :
+				  0;
+
+	p->n_phy_ports = fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0);
+	p->n_rx_ports = fpga_get_product_param(p_fpga, NT_RX_PORTS, 0);
+	p->n_ltx_avail = fpga_get_product_param(p_fpga, NT_LR_PRESENT, 0);
+	p->nb_cat_func = fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0);
+	p->nb_categories = fpga_get_product_param(p_fpga, NT_CATEGORIES, 0);
+	p->nb_queues = fpga_get_product_param(p_fpga, NT_QUEUES, 0);
+	p->nb_flow_types = fpga_get_product_param(p_fpga, NT_KM_FLOW_TYPES, 0) *
+			 clamp_one(km_present + flm_present);
+	p->nb_pm_ext = fpga_get_product_param(p_fpga, NT_CAT_N_EXT, 0);
+	p->nb_len = fpga_get_product_param(p_fpga, NT_CAT_N_LEN, 0);
+	p->nb_kcc_size =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_SIZE, 0) * kcc_present;
+	p->nb_kcc_banks =
+		fpga_get_product_param(p_fpga, NT_CAT_KCC_BANKS, 0) * kcc_present;
+	p->nb_km_categories =
+		fpga_get_product_param(p_fpga, NT_KM_CATEGORIES, 0) * km_present;
+	p->nb_km_cam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_BANKS, 0) * km_present;
+	p->nb_km_cam_record_words =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_REC_WORDS, 0) * km_present;
+	p->nb_km_cam_records =
+		fpga_get_product_param(p_fpga, NT_KM_CAM_RECORDS, 0) * km_present;
+	p->nb_km_tcam_banks =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANKS, 0) * km_present;
+	p->nb_km_tcam_bank_width =
+		fpga_get_product_param(p_fpga, NT_KM_TCAM_BANK_WIDTH, 0) *
+		km_present;
+	p->nb_flm_categories =
+		fpga_get_product_param(p_fpga, NT_FLM_CATEGORIES, 0) * flm_present;
+	p->nb_flm_size_mb = fpga_get_product_param(p_fpga, NT_FLM_SIZE_MB, 0);
+	p->nb_flm_entry_size = fpga_get_product_param(p_fpga, NT_FLM_ENTRY_SIZE, 0);
+	p->nb_flm_variant = fpga_get_product_param(p_fpga, NT_FLM_VARIANT, 0);
+	p->nb_flm_prios =
+		fpga_get_product_param(p_fpga, NT_FLM_PRIOS, 0) * flm_present;
+	p->nb_flm_pst_profiles =
+		fpga_get_product_param(p_fpga, NT_FLM_PST_PROFILES, 0) *
+		flm_present;
+	p->nb_hst_categories =
+		fpga_get_product_param(p_fpga, NT_HST_CATEGORIES, 0) * hst_present;
+	p->nb_qsl_categories = fpga_get_product_param(p_fpga, NT_QSL_CATEGORIES, 0);
+	p->nb_qsl_qst_entries = fpga_get_product_param(p_fpga, NT_QSL_QST_SIZE, 0);
+	p->nb_pdb_categories = fpga_get_product_param(p_fpga, NT_PDB_CATEGORIES, 0);
+	p->nb_ioa_categories =
+		fpga_get_product_param(p_fpga, NT_IOA_CATEGORIES, 0) * ioa_present;
+	p->nb_roa_categories =
+		fpga_get_product_param(p_fpga, NT_ROA_CATEGORIES, 0) * roa_present;
+	p->nb_dbs_categories =
+		RTE_MIN(fpga_get_product_param(p_fpga, NT_DBS_RX_QUEUES, 0),
+		    fpga_get_product_param(p_fpga, NT_DBS_TX_QUEUES, 0)) *
+		dbs_present;
+	p->nb_cat_km_if_cnt = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_CNT,
+					       km_present + flm_present);
+	p->m_cat_km_if_m0 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M0, -1);
+	p->m_cat_km_if_m1 = fpga_get_product_param(p_fpga, NT_CAT_KM_IF_M1, -1);
+	p->nb_tpe_categories =
+		fpga_get_product_param(p_fpga, NT_TPE_CATEGORIES, 0) * tpe_present;
+	p->nb_tx_cpy_writers =
+		fpga_get_product_param(p_fpga, NT_TX_CPY_WRITERS, 0) * tpe_present;
+	p->nb_tx_cpy_mask_mem =
+		fpga_get_product_param(p_fpga, NT_CPY_MASK_MEM, 0) * tpe_present;
+	p->nb_tx_rpl_depth =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_DEPTH, 0) * tpe_present;
+	p->nb_tx_rpl_ext_categories =
+		fpga_get_product_param(p_fpga, NT_TX_RPL_EXT_CATEGORIES, 0) *
+		tpe_present;
+	p->nb_tpe_ifr_categories =
+		fpga_get_product_param(p_fpga, NT_TX_MTU_PROFILE_IFR, 0);
+	return 0;
+}
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p)
+{
+	return p->n_phy_ports;
+}
+
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p)
+{
+	return p->n_rx_ports;
+}
+
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p)
+{
+	return p->n_ltx_avail;
+}
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p)
+{
+	return p->nb_categories;
+}
+
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p)
+{
+	return p->nb_kcc_size;
+}
+
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p)
+{
+	return p->nb_kcc_banks;
+}
+
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p)
+{
+	return p->nb_queues;
+}
+
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p)
+{
+	return p->nb_cat_func;
+}
+
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p)
+{
+	return p->nb_flow_types;
+}
+
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p)
+{
+	return p->nb_pm_ext;
+}
+
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p)
+{
+	return p->nb_len;
+}
+
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p)
+{
+	return p->nb_km_categories;
+}
+
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_cam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p)
+{
+	return p->nb_km_cam_record_words;
+}
+
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p)
+{
+	return p->nb_km_cam_records;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_banks;
+}
+
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p)
+{
+	return p->nb_km_tcam_bank_width;
+}
+
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p)
+{
+	return p->nb_flm_categories;
+}
+
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p)
+{
+	return p->nb_flm_size_mb;
+}
+
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p)
+{
+	return p->nb_flm_entry_size;
+}
+
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p)
+{
+	return p->nb_flm_variant;
+}
+
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p)
+{
+	return p->nb_flm_prios;
+}
+
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p)
+{
+	return p->nb_flm_pst_profiles;
+}
+
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p)
+{
+	return p->nb_hst_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p)
+{
+	return p->nb_qsl_categories;
+}
+
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p)
+{
+	return p->nb_qsl_qst_entries;
+}
+
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p)
+{
+	return p->nb_pdb_categories;
+}
+
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p)
+{
+	return p->nb_ioa_categories;
+}
+
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p)
+{
+	return p->nb_roa_categories;
+}
+
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p)
+{
+	return p->nb_dbs_categories;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p)
+{
+	return p->nb_cat_km_if_cnt;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m0;
+}
+
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p)
+{
+	return p->m_cat_km_if_m1;
+}
+
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_categories;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_writers;
+}
+
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p)
+{
+	return p->nb_tx_cpy_mask_mem;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_depth;
+}
+
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p)
+{
+	return p->nb_tx_rpl_ext_categories;
+}
+
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p)
+{
+	return p->nb_tpe_ifr_categories;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
new file mode 100644
index 0000000000..c697ba84e9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_INFO_H__
+#define __FLOW_NTHW_INFO_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct info_nthw;
+
+struct info_nthw *info_nthw_new(void);
+void info_nthw_delete(struct info_nthw *p);
+int info_nthw_init(struct info_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int info_nthw_setup(struct info_nthw *p, int n_idx, int n_idx_cnt);
+
+unsigned int info_nthw_get_nb_phy_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_rx_ports(const struct info_nthw *p);
+unsigned int info_nthw_get_ltx_avail(const struct info_nthw *p);
+
+unsigned int info_nthw_get_nb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_queues(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_funcs(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_flow_types(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pm_ext(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_len(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_size(const struct info_nthw *p);
+unsigned int info_nthw_get_kcc_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_record_words(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_cam_records(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_banks(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_km_tcam_bank_width(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_size_mb(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_entry_size(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_variant(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_prios(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_flm_pst_profiles(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_hst_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_qsl_qst_entries(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_pdb_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_ioa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_roa_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_dbs_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_cnt(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m0(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_cat_km_if_m1(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_writers(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_cpy_mask_mem(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_depth(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tx_rpl_ext_categories(const struct info_nthw *p);
+unsigned int info_nthw_get_nb_tpe_ifr_categories(const struct info_nthw *p);
+
+struct info_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+	unsigned int n_phy_ports;
+	unsigned int n_rx_ports;
+	unsigned int n_ltx_avail;
+	unsigned int nb_cat_func;
+	unsigned int nb_categories;
+	unsigned int nb_queues;
+	unsigned int nb_flow_types;
+	unsigned int nb_pm_ext;
+	unsigned int nb_len;
+	unsigned int nb_kcc_size;
+	unsigned int nb_kcc_banks;
+	unsigned int nb_km_categories;
+	unsigned int nb_km_cam_banks;
+	unsigned int nb_km_cam_record_words;
+	unsigned int nb_km_cam_records;
+	unsigned int nb_km_tcam_banks;
+	unsigned int nb_km_tcam_bank_width;
+	unsigned int nb_flm_categories;
+	unsigned int nb_flm_size_mb;
+	unsigned int nb_flm_entry_size;
+	unsigned int nb_flm_variant;
+	unsigned int nb_flm_prios;
+	unsigned int nb_flm_pst_profiles;
+	unsigned int nb_hst_categories;
+	unsigned int nb_qsl_categories;
+	unsigned int nb_qsl_qst_entries;
+	unsigned int nb_pdb_categories;
+	unsigned int nb_ioa_categories;
+	unsigned int nb_roa_categories;
+	unsigned int nb_dbs_categories;
+	unsigned int nb_cat_km_if_cnt;
+	unsigned int m_cat_km_if_m0;
+	unsigned int m_cat_km_if_m1;
+	unsigned int nb_tpe_categories;
+	unsigned int nb_tx_cpy_writers;
+	unsigned int nb_tx_cpy_mask_mem;
+	unsigned int nb_tx_rpl_depth;
+	unsigned int nb_tx_rpl_ext_categories;
+	unsigned int nb_tpe_ifr_categories;
+};
+
+#endif /* __FLOW_NTHW_INFO_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
new file mode 100644
index 0000000000..a83d443f6f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_ioa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_ioa, n_debug_mode);
+}
+
+struct ioa_nthw *ioa_nthw_new(void)
+{
+	struct ioa_nthw *p = malloc(sizeof(struct ioa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void ioa_nthw_delete(struct ioa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_IOA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Ioa %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_ioa = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_ioa, IOA_RECIPE_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, IOA_RECIPE_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_ioa, IOA_RECIPE_DATA);
+	p->mp_rcp_data_tunnel_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_TUNNEL_POP);
+	p->mp_rcp_data_vlan_pop =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_POP);
+	p->mp_rcp_data_vlan_push =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PUSH);
+	p->mp_rcp_data_vlan_vid =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_VID);
+	p->mp_rcp_data_vlan_dei =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_DEI);
+	p->mp_rcp_data_vlan_pcp =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_PCP);
+	p->mp_rcp_data_vlan_tpid_sel =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_VLAN_TPID_SEL);
+	p->mp_rcp_data_queue_override_en =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_OVERRIDE_EN);
+	p->mp_rcp_data_queue_id =
+		register_get_field(p->mp_rcp_data, IOA_RECIPE_DATA_QUEUE_ID);
+
+	/* Special Vlan Tpid */
+	p->mp_special = module_get_register(p->m_ioa, IOA_VLAN_TPID_SPECIAL);
+	p->mp_special_vlan_tpid_cust_tpid0 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID0);
+	p->mp_special_vlan_tpid_cust_tpid1 =
+		register_get_field(p->mp_special, IOA_VLAN_TPID_SPECIAL_CUSTTPID1);
+	{
+		/*
+		 * This extension in IOA is a messy way FPGA have chosen to
+		 * put control bits for EPP module in IOA. It is accepted as
+		 * we are going towards exchange IOA and ROA modules later
+		 * to get higher scalability in future.
+		 */
+		p->mp_roa_epp_ctrl =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_CTRL);
+		if (p->mp_roa_epp_ctrl) {
+			p->mp_roa_epp_addr =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_ADR);
+			p->mp_roa_epp_cnt =
+				register_get_field(p->mp_roa_epp_ctrl,
+						   IOA_ROA_EPP_CTRL_CNT);
+		} else {
+			p->mp_roa_epp_addr = NULL;
+			p->mp_roa_epp_cnt = NULL;
+		}
+
+		p->mp_roa_epp_data =
+			module_query_register(p->m_ioa, IOA_ROA_EPP_DATA);
+		if (p->mp_roa_epp_data) {
+			p->mp_roa_epp_data_push_tunnel =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_PUSH_TUNNEL);
+			p->mp_roa_epp_data_tx_port =
+				register_get_field(p->mp_roa_epp_data,
+						   IOA_ROA_EPP_DATA_TX_PORT);
+		} else {
+			p->mp_roa_epp_data_push_tunnel = NULL;
+			p->mp_roa_epp_data_tx_port = NULL;
+		}
+	}
+	return 0;
+}
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tunnel_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pop, val);
+}
+
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_push, val);
+}
+
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_vid, val);
+}
+
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_dei, val);
+}
+
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_pcp, val);
+}
+
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_vlan_tpid_sel, val);
+}
+
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_override_en, val);
+}
+
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_queue_id, val);
+}
+
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid0, val);
+}
+
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_special_vlan_tpid_cust_tpid1, val);
+}
+
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p)
+{
+	register_flush(p->mp_special, 1);
+}
+
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_addr)
+		field_set_val32(p->mp_roa_epp_addr, val);
+}
+
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_cnt)
+		field_set_val32(p->mp_roa_epp_cnt, val);
+}
+
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_push_tunnel)
+		field_set_val32(p->mp_roa_epp_data_push_tunnel, val);
+}
+
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val)
+{
+	if (p->mp_roa_epp_data_tx_port)
+		field_set_val32(p->mp_roa_epp_data_tx_port, val);
+}
+
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p)
+{
+	if (p->mp_roa_epp_ctrl)
+		register_flush(p->mp_roa_epp_ctrl, 1);
+	if (p->mp_roa_epp_data)
+		register_flush(p->mp_roa_epp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
new file mode 100644
index 0000000000..8ab30d2d28
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_ioa.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_IOA_H__
+#define __FLOW_NTHW_IOA_H__
+
+#include "nthw_fpga_model.h"
+
+#include <stdint.h> /* uint32_t */
+
+struct ioa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_ioa;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tunnel_pop;
+	nt_field_t *mp_rcp_data_vlan_pop;
+	nt_field_t *mp_rcp_data_vlan_push;
+	nt_field_t *mp_rcp_data_vlan_vid;
+	nt_field_t *mp_rcp_data_vlan_dei;
+	nt_field_t *mp_rcp_data_vlan_pcp;
+	nt_field_t *mp_rcp_data_vlan_tpid_sel;
+	nt_field_t *mp_rcp_data_queue_override_en;
+	nt_field_t *mp_rcp_data_queue_id;
+
+	nt_register_t *mp_special;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid0;
+	nt_field_t *mp_special_vlan_tpid_cust_tpid1;
+
+	nt_register_t *mp_roa_epp_ctrl;
+	nt_field_t *mp_roa_epp_addr;
+	nt_field_t *mp_roa_epp_cnt;
+	nt_register_t *mp_roa_epp_data;
+	nt_field_t *mp_roa_epp_data_push_tunnel;
+	nt_field_t *mp_roa_epp_data_tx_port;
+};
+
+typedef struct ioa_nthw ioa_nthw_t;
+
+struct ioa_nthw *ioa_nthw_new(void);
+void ioa_nthw_delete(struct ioa_nthw *p);
+int ioa_nthw_init(struct ioa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int ioa_nthw_setup(struct ioa_nthw *p, int n_idx, int n_idx_cnt);
+void ioa_nthw_set_debug_mode(struct ioa_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void ioa_nthw_rcp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_tunnel_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pop(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_push(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_vid(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_dei(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_pcp(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_vlan_tpid_sel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_override_en(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_queue_id(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_rcp_flush(const struct ioa_nthw *p);
+
+/* Vlan Tpid Special */
+void ioa_nthw_special_vlan_tpid_cust_tpid0(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_cust_tpid1(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_special_vlan_tpid_flush(const struct ioa_nthw *p);
+
+/* EPP module */
+void ioa_nthw_roa_epp_select(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_cnt(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_push_tunnel(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_tx_port(const struct ioa_nthw *p, uint32_t val);
+void ioa_nthw_roa_epp_flush(const struct ioa_nthw *p);
+
+#endif /* __FLOW_NTHW_IOA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
new file mode 100644
index 0000000000..af54e14940
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_km.h"
+
+#include <stdint.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#define CHECK_AND_SET_VALUE(_a, val)             \
+	do {                                    \
+		__typeof__(_a) (a) = (_a); \
+		if (a) {                        \
+			field_set_val32(a, val); \
+		}                               \
+	} while (0)
+
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_km, n_debug_mode);
+}
+
+struct km_nthw *km_nthw_new(void)
+{
+	struct km_nthw *p = malloc(sizeof(struct km_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void km_nthw_delete(struct km_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_KM, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Km %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_km = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_km, KM_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, KM_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_km, KM_RCP_DATA);
+	p->mp_rcp_data_qw0_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_DYN);
+	p->mp_rcp_data_qw0_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_OFS);
+	p->mp_rcp_data_qw0_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_A);
+	p->mp_rcp_data_qw0_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW0_SEL_B);
+	p->mp_rcp_data_qw4_dyn =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_DYN);
+	p->mp_rcp_data_qw4_ofs =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_OFS);
+	p->mp_rcp_data_qw4_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_A);
+	p->mp_rcp_data_qw4_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_QW4_SEL_B);
+
+	p->mp_rcp_data_sw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_DYN);
+	p->mp_rcp_data_dw8_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_DYN);
+
+	p->mp_rcp_data_swx_ovs_sb =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_OVS_SB);
+	p->mp_rcp_data_swx_cch =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SWX_CCH);
+	p->mp_rcp_data_swx_sel_a =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_A);
+	p->mp_rcp_data_swx_sel_b =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SWX_SEL_B);
+	p->mp_rcp_data_mask_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_A);
+	p->mp_rcp_data_mask_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_MASK_B);
+	p->mp_rcp_data_dual = register_get_field(p->mp_rcp_data, KM_RCP_DATA_DUAL);
+	p->mp_rcp_data_paired =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_PAIRED);
+	p->mp_rcp_data_el_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_A);
+	p->mp_rcp_data_el_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_EL_B);
+	p->mp_rcp_data_info_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_A);
+	p->mp_rcp_data_info_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_INFO_B);
+	p->mp_rcp_data_ftm_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_A);
+	p->mp_rcp_data_ftm_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_FTM_B);
+	p->mp_rcp_data_bank_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_A);
+	p->mp_rcp_data_bank_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_BANK_B);
+	p->mp_rcp_data_kl_a = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_A);
+	p->mp_rcp_data_kl_b = register_get_field(p->mp_rcp_data, KM_RCP_DATA_KL_B);
+	p->mp_rcp_data_flow_set =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_FLOW_SET);
+	p->mp_rcp_data_keyway_a =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_A);
+	p->mp_rcp_data_keyway_b =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_KEYWAY_B);
+	p->mp_rcp_data_synergy_mode =
+		register_get_field(p->mp_rcp_data, KM_RCP_DATA_SYNERGY_MODE);
+
+	/* CAM */
+	p->mp_cam_ctrl = module_get_register(p->m_km, KM_CAM_CTRL);
+	p->mp_cam_addr = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_ADR);
+	p->mp_cam_cnt = register_get_field(p->mp_cam_ctrl, KM_CAM_CTRL_CNT);
+	p->mp_cam_data = module_get_register(p->m_km, KM_CAM_DATA);
+	p->mp_cam_data_w0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W0);
+	p->mp_cam_data_w1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W1);
+	p->mp_cam_data_w2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W2);
+	p->mp_cam_data_w3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W3);
+	p->mp_cam_data_w4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W4);
+	p->mp_cam_data_w5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_W5);
+	p->mp_cam_data_ft0 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT0);
+	p->mp_cam_data_ft1 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT1);
+	p->mp_cam_data_ft2 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT2);
+	p->mp_cam_data_ft3 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT3);
+	p->mp_cam_data_ft4 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT4);
+	p->mp_cam_data_ft5 = register_get_field(p->mp_cam_data, KM_CAM_DATA_FT5);
+	/* TCAM */
+	p->mp_tcam_ctrl = module_get_register(p->m_km, KM_TCAM_CTRL);
+	p->mp_tcam_addr = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_ADR);
+	p->mp_tcam_cnt = register_get_field(p->mp_tcam_ctrl, KM_TCAM_CTRL_CNT);
+	p->mp_tcam_data = module_get_register(p->m_km, KM_TCAM_DATA);
+	p->mp_tcam_data_t = register_get_field(p->mp_tcam_data, KM_TCAM_DATA_T);
+	/* TCI */
+	p->mp_tci_ctrl = module_get_register(p->m_km, KM_TCI_CTRL);
+	p->mp_tci_addr = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_ADR);
+	p->mp_tci_cnt = register_get_field(p->mp_tci_ctrl, KM_TCI_CTRL_CNT);
+	p->mp_tci_data = module_get_register(p->m_km, KM_TCI_DATA);
+	p->mp_tci_data_color = register_get_field(p->mp_tci_data, KM_TCI_DATA_COLOR);
+	p->mp_tci_data_ft = register_get_field(p->mp_tci_data, KM_TCI_DATA_FT);
+	/* TCQ */
+	p->mp_tcq_ctrl = module_get_register(p->m_km, KM_TCQ_CTRL);
+	p->mp_tcq_addr = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_ADR);
+	p->mp_tcq_cnt = register_get_field(p->mp_tcq_ctrl, KM_TCQ_CTRL_CNT);
+	p->mp_tcq_data = module_get_register(p->m_km, KM_TCQ_DATA);
+	p->mp_tcq_data_bank_mask =
+		register_query_field(p->mp_tcq_data, KM_TCQ_DATA_BANK_MASK);
+	p->mp_tcq_data_qual = register_get_field(p->mp_tcq_data, KM_TCQ_DATA_QUAL);
+
+	p->mp_rcp_data_dw0_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_DYN);
+	p->mp_rcp_data_dw0_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW0_B_OFS);
+	p->mp_rcp_data_dw2_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_DYN);
+	p->mp_rcp_data_dw2_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW2_B_OFS);
+	p->mp_rcp_data_sw4_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_DYN);
+	p->mp_rcp_data_sw4_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW4_B_OFS);
+	p->mp_rcp_data_sw5_b_dyn =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_DYN);
+	p->mp_rcp_data_sw5_b_ofs =
+		register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW5_B_OFS);
+	if (!p->mp_rcp_data_dw0_b_dyn) {
+		/* old field defines */
+		p->mp_rcp_data_dw0_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_DYN);
+		p->mp_rcp_data_dw0_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW0_B_OFS);
+		p->mp_rcp_data_dw2_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_DYN);
+		p->mp_rcp_data_dw2_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_QW4_B_OFS);
+		p->mp_rcp_data_sw4_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_DYN);
+		p->mp_rcp_data_sw4_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_B_OFS);
+		p->mp_rcp_data_sw5_b_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_DYN);
+		p->mp_rcp_data_sw5_b_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_B_OFS);
+	}
+
+	/* v0.6+ */
+	if (p->mp_rcp_data_dw8_dyn) {
+		p->mp_rcp_data_dw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_OFS);
+		p->mp_rcp_data_dw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_A);
+		p->mp_rcp_data_dw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW8_SEL_B);
+		p->mp_rcp_data_dw10_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_DYN);
+		p->mp_rcp_data_dw10_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_OFS);
+		p->mp_rcp_data_dw10_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_A);
+		p->mp_rcp_data_dw10_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_DW10_SEL_B);
+	} else if (p->mp_rcp_data_sw8_dyn) {
+		p->mp_rcp_data_sw8_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_OFS);
+		p->mp_rcp_data_sw8_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_A);
+		p->mp_rcp_data_sw8_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW8_SEL_B);
+		p->mp_rcp_data_sw9_dyn =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_DYN);
+		p->mp_rcp_data_sw9_ofs =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_OFS);
+		p->mp_rcp_data_sw9_sel_a =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_A);
+		p->mp_rcp_data_sw9_sel_b =
+			register_query_field(p->mp_rcp_data, KM_RCP_DATA_SW9_SEL_B);
+	}
+
+	return 0;
+}
+
+/* RCP */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+};
+
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_dyn, val);
+};
+
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_ofs, val);
+};
+
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_a, val);
+};
+
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw0_sel_b, val);
+};
+
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_dyn, val);
+};
+
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_ofs, val);
+};
+
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_a, val);
+};
+
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_qw4_sel_b, val);
+};
+
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_dyn, val);
+};
+
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_ofs, val);
+};
+
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_a, val);
+};
+
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw8_sel_b, val);
+};
+
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_dyn, val);
+};
+
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_ofs, val);
+};
+
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_a, val);
+};
+
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_sw9_sel_b, val);
+};
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_ovs_sb, val);
+};
+
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_swx_cch, val);
+};
+
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_ofs, val);
+};
+
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_a, val);
+};
+
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw8_sel_b, val);
+};
+
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_dyn, val);
+};
+
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_ofs, val);
+};
+
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_a, val);
+};
+
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_dw10_sel_b, val);
+};
+
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_a, val);
+};
+
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_swx_sel_b, val);
+};
+
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+};
+
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_b, val, p->mp_rcp_data_mask_b->mn_words);
+};
+
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rcp_data_mask_a, val, p->mp_rcp_data_mask_a->mn_words);
+}; /* for DW8/DW10 from v6+ */
+
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dual, val);
+};
+
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_paired, val);
+};
+
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_a, val);
+};
+
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_el_b, val);
+};
+
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_a, val);
+};
+
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_info_b, val);
+};
+
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_a, val);
+};
+
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ftm_b, val);
+};
+
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_a, val);
+};
+
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_bank_b, val);
+};
+
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_a, val);
+};
+
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_kl_b, val);
+};
+
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_flow_set, val);
+};
+
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_a, val);
+};
+
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_rcp_data_keyway_b, val);
+};
+
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_synergy_mode, val);
+};
+
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_dyn, val);
+};
+
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw0_b_ofs, val);
+};
+
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_dyn, val);
+};
+
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dw2_b_ofs, val);
+};
+
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_dyn, val);
+};
+
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw4_b_ofs, val);
+};
+
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_dyn, val);
+};
+
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_sw5_b_ofs, val);
+};
+
+void km_nthw_rcp_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+};
+
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_addr, val);
+};
+
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_cnt, val);
+};
+
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w0, val);
+};
+
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w1, val);
+};
+
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w2, val);
+};
+
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w3, val);
+};
+
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w4, val);
+};
+
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_w5, val);
+};
+
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft0, val);
+};
+
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft1, val);
+};
+
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft2, val);
+};
+
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft3, val);
+};
+
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft4, val);
+};
+
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_cam_data_ft5, val);
+};
+
+void km_nthw_cam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_cam_ctrl, 1);
+	register_flush(p->mp_cam_data, 1);
+};
+
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_addr, val);
+};
+
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcam_cnt, val);
+};
+
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcam_data_t, val, 3);
+};
+
+void km_nthw_tcam_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcam_ctrl, 1);
+	register_flush(p->mp_tcam_data, 1);
+};
+
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_addr, val);
+};
+
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_cnt, val);
+};
+
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_color, val);
+};
+
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tci_data_ft, val);
+};
+
+void km_nthw_tci_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tci_ctrl, 1);
+	register_flush(p->mp_tci_data, 1);
+};
+
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_addr, val);
+};
+
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_cnt, val);
+};
+
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val)
+{
+	CHECK_AND_SET_VALUE(p->mp_tcq_data_bank_mask, val);
+};
+
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tcq_data_qual, val);
+};
+
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tcq_data_qual, val, 3);
+}; /* to use in v4 */
+
+void km_nthw_tcq_flush(const struct km_nthw *p)
+{
+	register_flush(p->mp_tcq_ctrl, 1);
+	register_flush(p->mp_tcq_data, 1);
+};
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
new file mode 100644
index 0000000000..61f9ed2ae4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_km.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_KM_H__
+#define __FLOW_NTHW_KM_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct km_nthw;
+
+typedef struct km_nthw km_nthw_t;
+
+struct km_nthw *km_nthw_new(void);
+void km_nthw_delete(struct km_nthw *p);
+int km_nthw_init(struct km_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int km_nthw_setup(struct km_nthw *p, int n_idx, int n_idx_cnt);
+void km_nthw_set_debug_mode(struct km_nthw *p, unsigned int n_debug_mode);
+
+/* RCP initial v3 */
+void km_nthw_rcp_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw0_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw0_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_qw4_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_qw4_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw9_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw9_sel_b(const struct km_nthw *p, uint32_t val);
+/* subst in v6 */
+void km_nthw_rcp_dw8_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw8_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw8_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw10_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw10_sel_b(const struct km_nthw *p, uint32_t val);
+
+void km_nthw_rcp_swx_ovs_sb(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_cch(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_swx_sel_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_mask_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_d_a(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_mask_b(const struct km_nthw *p, const uint32_t *val);
+void km_nthw_rcp_dual(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_paired(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_el_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_info_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_ftm_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_bank_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_kl_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_flow_set(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_a(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_keyway_b(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_synergy_mode(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw0_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_dw2_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_dw2_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw4_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw4_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_sw5_b_dyn(const struct km_nthw *p, uint32_t val);
+void km_nthw_rcp_sw5_b_ofs(const struct km_nthw *p, int32_t val);
+void km_nthw_rcp_flush(const struct km_nthw *p);
+/* CAM */
+void km_nthw_cam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_w5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft0(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft1(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft2(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft3(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft4(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_ft5(const struct km_nthw *p, uint32_t val);
+void km_nthw_cam_flush(const struct km_nthw *p);
+/* TCAM */
+void km_nthw_tcam_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcam_t(const struct km_nthw *p, uint32_t *val);
+void km_nthw_tcam_flush(const struct km_nthw *p);
+/* TCI */
+void km_nthw_tci_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_color(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_ft(const struct km_nthw *p, uint32_t val);
+void km_nthw_tci_flush(const struct km_nthw *p);
+/* TCQ */
+void km_nthw_tcq_select(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_cnt(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_bank_mask(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual(const struct km_nthw *p, uint32_t val);
+void km_nthw_tcq_qual72(const struct km_nthw *p, uint32_t *val);
+
+void km_nthw_tcq_flush(const struct km_nthw *p);
+
+struct km_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_km;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_qw0_dyn;
+	nt_field_t *mp_rcp_data_qw0_ofs;
+	nt_field_t *mp_rcp_data_qw0_sel_a;
+	nt_field_t *mp_rcp_data_qw0_sel_b;
+	nt_field_t *mp_rcp_data_qw4_dyn;
+	nt_field_t *mp_rcp_data_qw4_ofs;
+	nt_field_t *mp_rcp_data_qw4_sel_a;
+	nt_field_t *mp_rcp_data_qw4_sel_b;
+	nt_field_t *mp_rcp_data_sw8_dyn;
+	nt_field_t *mp_rcp_data_sw8_ofs;
+	nt_field_t *mp_rcp_data_sw8_sel_a;
+	nt_field_t *mp_rcp_data_sw8_sel_b;
+	nt_field_t *mp_rcp_data_sw9_dyn;
+	nt_field_t *mp_rcp_data_sw9_ofs;
+	nt_field_t *mp_rcp_data_sw9_sel_a;
+	nt_field_t *mp_rcp_data_sw9_sel_b;
+
+	nt_field_t *mp_rcp_data_dw8_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw8_sel_b; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_dyn; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_ofs; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_a; /* substituted Sw<x> from v6+ */
+	nt_field_t *mp_rcp_data_dw10_sel_b; /* substituted Sw<x> from v6+ */
+
+	nt_field_t *mp_rcp_data_swx_ovs_sb;
+	nt_field_t *mp_rcp_data_swx_cch;
+	nt_field_t *mp_rcp_data_swx_sel_a;
+	nt_field_t *mp_rcp_data_swx_sel_b;
+	nt_field_t *mp_rcp_data_mask_a;
+	nt_field_t *mp_rcp_data_mask_b;
+	nt_field_t *mp_rcp_data_dual;
+	nt_field_t *mp_rcp_data_paired;
+	nt_field_t *mp_rcp_data_el_a;
+	nt_field_t *mp_rcp_data_el_b;
+	nt_field_t *mp_rcp_data_info_a;
+	nt_field_t *mp_rcp_data_info_b;
+	nt_field_t *mp_rcp_data_ftm_a;
+	nt_field_t *mp_rcp_data_ftm_b;
+	nt_field_t *mp_rcp_data_bank_a;
+	nt_field_t *mp_rcp_data_bank_b;
+	nt_field_t *mp_rcp_data_kl_a;
+	nt_field_t *mp_rcp_data_kl_b;
+	nt_field_t *mp_rcp_data_flow_set;
+	nt_field_t *mp_rcp_data_keyway_a;
+	nt_field_t *mp_rcp_data_keyway_b;
+	nt_field_t *mp_rcp_data_synergy_mode;
+	nt_field_t *mp_rcp_data_dw0_b_dyn;
+	nt_field_t *mp_rcp_data_dw0_b_ofs;
+	nt_field_t *mp_rcp_data_dw2_b_dyn;
+	nt_field_t *mp_rcp_data_dw2_b_ofs;
+	nt_field_t *mp_rcp_data_sw4_b_dyn;
+	nt_field_t *mp_rcp_data_sw4_b_ofs;
+	nt_field_t *mp_rcp_data_sw5_b_dyn;
+	nt_field_t *mp_rcp_data_sw5_b_ofs;
+
+	nt_register_t *mp_cam_ctrl;
+	nt_field_t *mp_cam_addr;
+	nt_field_t *mp_cam_cnt;
+	nt_register_t *mp_cam_data;
+	nt_field_t *mp_cam_data_w0;
+	nt_field_t *mp_cam_data_w1;
+	nt_field_t *mp_cam_data_w2;
+	nt_field_t *mp_cam_data_w3;
+	nt_field_t *mp_cam_data_w4;
+	nt_field_t *mp_cam_data_w5;
+	nt_field_t *mp_cam_data_ft0;
+	nt_field_t *mp_cam_data_ft1;
+	nt_field_t *mp_cam_data_ft2;
+	nt_field_t *mp_cam_data_ft3;
+	nt_field_t *mp_cam_data_ft4;
+	nt_field_t *mp_cam_data_ft5;
+
+	nt_register_t *mp_tcam_ctrl;
+	nt_field_t *mp_tcam_addr;
+	nt_field_t *mp_tcam_cnt;
+	nt_register_t *mp_tcam_data;
+	nt_field_t *mp_tcam_data_t;
+
+	nt_register_t *mp_tci_ctrl;
+	nt_field_t *mp_tci_addr;
+	nt_field_t *mp_tci_cnt;
+	nt_register_t *mp_tci_data;
+	nt_field_t *mp_tci_data_color;
+	nt_field_t *mp_tci_data_ft;
+
+	nt_register_t *mp_tcq_ctrl;
+	nt_field_t *mp_tcq_addr;
+	nt_field_t *mp_tcq_cnt;
+	nt_register_t *mp_tcq_data;
+	nt_field_t *mp_tcq_data_bank_mask;
+	nt_field_t *mp_tcq_data_qual;
+};
+
+#endif /* __FLOW_NTHW_KM_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
new file mode 100644
index 0000000000..e823a527bb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_pdb.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_pdb, n_debug_mode);
+}
+
+struct pdb_nthw *pdb_nthw_new(void)
+{
+	struct pdb_nthw *p = malloc(sizeof(struct pdb_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void pdb_nthw_delete(struct pdb_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_PDB, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Pdb %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_pdb = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_pdb, PDB_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, PDB_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_pdb, PDB_RCP_DATA);
+	p->mp_rcp_data_descriptor =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESCRIPTOR);
+	p->mp_rcp_data_desc_len =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DESC_LEN);
+	p->mp_rcp_data_tx_port =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_PORT);
+	p->mp_rcp_data_tx_ignore =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_IGNORE);
+	p->mp_rcp_data_tx_now =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_TX_NOW);
+	p->mp_rcp_data_crc_overwrite =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_CRC_OVERWRITE);
+	p->mp_rcp_data_align = register_get_field(p->mp_rcp_data, PDB_RCP_DATA_ALIGN);
+	p->mp_rcp_data_ofs0_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_DYN);
+	p->mp_rcp_data_ofs0_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS0_REL);
+	p->mp_rcp_data_ofs1_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_DYN);
+	p->mp_rcp_data_ofs1_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS1_REL);
+	p->mp_rcp_data_ofs2_dyn =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_DYN);
+	p->mp_rcp_data_ofs2_rel =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_OFS2_REL);
+	p->mp_rcp_data_ip_prot_tnl =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_IP_PROT_TNL);
+	p->mp_rcp_data_ppc_hsh =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_PPC_HSH);
+	p->mp_rcp_data_duplicate_en =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_EN);
+	p->mp_rcp_data_duplicate_bit =
+		register_get_field(p->mp_rcp_data, PDB_RCP_DATA_DUPLICATE_BIT);
+	p->mp_rcp_data_pcap_keep_fcs =
+		register_query_field(p->mp_rcp_data, PDB_RCP_DATA_PCAP_KEEP_FCS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_pdb, PDB_CONFIG);
+	p->mp_config_ts_format =
+		register_get_field(p->mp_config, PDB_CONFIG_TS_FORMAT);
+	p->mp_config_port_ofs =
+		register_get_field(p->mp_config, PDB_CONFIG_PORT_OFS);
+
+	return 0;
+}
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_descriptor, val);
+}
+
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_desc_len, val);
+}
+
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_port, val);
+}
+
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_ignore, val);
+}
+
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tx_now, val);
+}
+
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_crc_overwrite, val);
+}
+
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_align, val);
+}
+
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs0_rel, val);
+}
+
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs1_rel, val);
+}
+
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_dyn, val);
+}
+
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs2_rel, val);
+}
+
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ip_prot_tnl, val);
+}
+
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ppc_hsh, val);
+}
+
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_en, val);
+}
+
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_duplicate_bit, val);
+}
+
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_pcap_keep_fcs)
+		field_set_val32(p->mp_rcp_data_pcap_keep_fcs, val);
+}
+
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_ts_format, val);
+}
+
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_port_ofs, val);
+}
+
+void pdb_nthw_config_flush(const struct pdb_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
new file mode 100644
index 0000000000..aed050eca5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_pdb.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_PDB_H__
+#define __FLOW_NTHW_PDB_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct pdb_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_pdb;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_descriptor;
+	nt_field_t *mp_rcp_data_desc_len;
+	nt_field_t *mp_rcp_data_tx_port;
+	nt_field_t *mp_rcp_data_tx_ignore;
+	nt_field_t *mp_rcp_data_tx_now;
+	nt_field_t *mp_rcp_data_crc_overwrite;
+	nt_field_t *mp_rcp_data_align;
+	nt_field_t *mp_rcp_data_ofs0_dyn;
+	nt_field_t *mp_rcp_data_ofs0_rel;
+	nt_field_t *mp_rcp_data_ofs1_dyn;
+	nt_field_t *mp_rcp_data_ofs1_rel;
+	nt_field_t *mp_rcp_data_ofs2_dyn;
+	nt_field_t *mp_rcp_data_ofs2_rel;
+	nt_field_t *mp_rcp_data_ip_prot_tnl;
+	nt_field_t *mp_rcp_data_ppc_hsh;
+	nt_field_t *mp_rcp_data_duplicate_en;
+	nt_field_t *mp_rcp_data_duplicate_bit;
+	nt_field_t *mp_rcp_data_pcap_keep_fcs;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_ts_format;
+	nt_field_t *mp_config_port_ofs;
+};
+
+typedef struct pdb_nthw pdb_nthw_t;
+
+struct pdb_nthw *pdb_nthw_new(void);
+void pdb_nthw_delete(struct pdb_nthw *p);
+int pdb_nthw_init(struct pdb_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int pdb_nthw_setup(struct pdb_nthw *p, int n_idx, int n_idx_cnt);
+void pdb_nthw_set_debug_mode(struct pdb_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void pdb_nthw_rcp_select(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_cnt(const struct pdb_nthw *p, uint32_t val);
+
+void pdb_nthw_rcp_descriptor(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_desc_len(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_port(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_ignore(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_tx_now(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_crc_overwrite(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_align(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs0_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs1_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs1_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ofs2_dyn(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ofs2_rel(const struct pdb_nthw *p, int32_t val);
+void pdb_nthw_rcp_ip_prot_tnl(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_ppc_hsh(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_en(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_duplicate_bit(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_data_pcap_keep_fcs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_rcp_flush(const struct pdb_nthw *p);
+
+/* CONFIG */
+void pdb_nthw_config_ts_format(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_port_ofs(const struct pdb_nthw *p, uint32_t val);
+void pdb_nthw_config_flush(const struct pdb_nthw *p);
+
+#endif /* __FLOW_NTHW_PDB_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
new file mode 100644
index 0000000000..6c13824df6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_qsl.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_qsl, n_debug_mode);
+}
+
+struct qsl_nthw *qsl_nthw_new(void)
+{
+	struct qsl_nthw *p = malloc(sizeof(struct qsl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void qsl_nthw_delete(struct qsl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_QSL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: QSL %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_qsl = p_mod;
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_qsl, QSL_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, QSL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_qsl, QSL_RCP_DATA);
+	p->mp_rcp_data_discard =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DISCARD);
+	p->mp_rcp_data_drop = register_get_field(p->mp_rcp_data, QSL_RCP_DATA_DROP);
+	p->mp_rcp_data_tbl_lo =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_LO);
+	p->mp_rcp_data_tbl_hi =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_HI);
+	p->mp_rcp_data_tbl_idx =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_IDX);
+	p->mp_rcp_data_tbl_msk =
+		register_get_field(p->mp_rcp_data, QSL_RCP_DATA_TBL_MSK);
+	p->mp_rcp_data_cao = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_CAO);
+	p->mp_rcp_data_lr = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_LR);
+	p->mp_rcp_data_tsa = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_TSA);
+	p->mp_rcp_data_vli = register_query_field(p->mp_rcp_data, QSL_RCP_DATA_VLI);
+
+	/* QST */
+	p->mp_qst_ctrl = module_get_register(p->m_qsl, QSL_QST_CTRL);
+	p->mp_qst_addr = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_ADR);
+	p->mp_qst_cnt = register_get_field(p->mp_qst_ctrl, QSL_QST_CTRL_CNT);
+	p->mp_qst_data = module_get_register(p->m_qsl, QSL_QST_DATA);
+	p->mp_qst_data_queue = register_get_field(p->mp_qst_data, QSL_QST_DATA_QUEUE);
+	p->mp_qst_data_en = register_query_field(p->mp_qst_data, QSL_QST_DATA_EN);
+	p->mp_qst_data_tx_port =
+		register_query_field(p->mp_qst_data, QSL_QST_DATA_TX_PORT);
+	p->mp_qst_data_lre = register_query_field(p->mp_qst_data, QSL_QST_DATA_LRE);
+	p->mp_qst_data_tci = register_query_field(p->mp_qst_data, QSL_QST_DATA_TCI);
+	p->mp_qst_data_ven = register_query_field(p->mp_qst_data, QSL_QST_DATA_VEN);
+	/* QEN */
+	p->mp_qen_ctrl = module_get_register(p->m_qsl, QSL_QEN_CTRL);
+	p->mp_qen_addr = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_ADR);
+	p->mp_qen_cnt = register_get_field(p->mp_qen_ctrl, QSL_QEN_CTRL_CNT);
+	p->mp_qen_data = module_get_register(p->m_qsl, QSL_QEN_DATA);
+	p->mp_qen_data_en = register_get_field(p->mp_qen_data, QSL_QEN_DATA_EN);
+	/* UNMQ */
+	p->mp_unmq_ctrl = module_get_register(p->m_qsl, QSL_UNMQ_CTRL);
+	p->mp_unmq_addr = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_ADR);
+	p->mp_unmq_cnt = register_get_field(p->mp_unmq_ctrl, QSL_UNMQ_CTRL_CNT);
+	p->mp_unmq_data = module_get_register(p->m_qsl, QSL_UNMQ_DATA);
+	p->mp_unmq_data_dest_queue =
+		register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_DEST_QUEUE);
+	p->mp_unmq_data_en = register_get_field(p->mp_unmq_data, QSL_UNMQ_DATA_EN);
+
+	if (!p->mp_qst_data_en) {
+		/* changed name from EN to QEN in v0.7 */
+		p->mp_qst_data_en =
+			register_get_field(p->mp_qst_data, QSL_QST_DATA_QEN);
+	}
+
+	/* LTX - not there anymore from v0.7+ */
+	p->mp_ltx_ctrl = module_query_register(p->m_qsl, QSL_LTX_CTRL);
+	if (p->mp_ltx_ctrl) {
+		p->mp_ltx_addr =
+			register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_ADR);
+		p->mp_ltx_cnt = register_get_field(p->mp_ltx_ctrl, QSL_LTX_CTRL_CNT);
+	} else {
+		p->mp_ltx_addr = NULL;
+		p->mp_ltx_cnt = NULL;
+	}
+	p->mp_ltx_data = module_query_register(p->m_qsl, QSL_LTX_DATA);
+	if (p->mp_ltx_data) {
+		p->mp_ltx_data_lr =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_LR);
+		p->mp_ltx_data_tx_port =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TX_PORT);
+		p->mp_ltx_data_tsa =
+			register_get_field(p->mp_ltx_data, QSL_LTX_DATA_TSA);
+	} else {
+		p->mp_ltx_data_lr = NULL;
+		p->mp_ltx_data_tx_port = NULL;
+		p->mp_ltx_data_tsa = NULL;
+	}
+	return 0;
+}
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+};
+
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_discard, val);
+}
+
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_drop, val);
+}
+
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_lo, val);
+}
+
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_hi, val);
+}
+
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val)
+
+{
+	field_set_val32(p->mp_rcp_data_tbl_idx, val);
+}
+
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tbl_msk, val);
+}
+
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_cao)
+		field_set_val32(p->mp_rcp_data_cao, val);
+}
+
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_lr)
+		field_set_val32(p->mp_rcp_data_lr, val);
+}
+
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_tsa)
+		field_set_val32(p->mp_rcp_data_tsa, val);
+}
+
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_rcp_data_vli)
+		field_set_val32(p->mp_rcp_data_vli, val);
+}
+
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_addr, val);
+}
+
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_addr)
+		field_set_val32(p->mp_ltx_cnt, val);
+}
+
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_lr)
+		field_set_val32(p->mp_ltx_data_lr, val);
+}
+
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tx_port)
+		field_set_val32(p->mp_ltx_data_tx_port, val);
+}
+
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_ltx_data_tsa)
+		field_set_val32(p->mp_ltx_data_tsa, val);
+};
+
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_ltx_ctrl, 1);
+	register_flush(p->mp_ltx_data, 1);
+}
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_addr, val);
+}
+
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_cnt, val);
+}
+
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_queue, val);
+}
+
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qst_data_en, val);
+}
+
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tx_port)
+		field_set_val32(p->mp_qst_data_tx_port, val);
+}
+
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_lre)
+		field_set_val32(p->mp_qst_data_lre, val);
+}
+
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_tci)
+		field_set_val32(p->mp_qst_data_tci, val);
+}
+
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val)
+{
+	if (p->mp_qst_data_ven)
+		field_set_val32(p->mp_qst_data_ven, val);
+}
+
+void qsl_nthw_qst_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qst_ctrl, 1);
+	register_flush(p->mp_qst_data, 1);
+}
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_addr, val);
+}
+
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_cnt, val);
+}
+
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_qen_data_en, val);
+}
+
+void qsl_nthw_qen_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_qen_ctrl, 1);
+	register_flush(p->mp_qen_data, 1);
+}
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_addr, val);
+}
+
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_cnt, val);
+}
+
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_dest_queue, val);
+}
+
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_unmq_data_en, val);
+}
+
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p)
+{
+	register_flush(p->mp_unmq_ctrl, 1);
+	register_flush(p->mp_unmq_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
new file mode 100644
index 0000000000..eeebbcf1c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_qsl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_QSL_H__
+#define __FLOW_NTHW_QSL_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct qsl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_qsl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_discard;
+	nt_field_t *mp_rcp_data_drop;
+	nt_field_t *mp_rcp_data_tbl_lo;
+	nt_field_t *mp_rcp_data_tbl_hi;
+	nt_field_t *mp_rcp_data_tbl_idx;
+	nt_field_t *mp_rcp_data_tbl_msk;
+	nt_field_t *mp_rcp_data_cao;
+	nt_field_t *mp_rcp_data_lr;
+	nt_field_t *mp_rcp_data_tsa;
+	nt_field_t *mp_rcp_data_vli;
+
+	nt_register_t *mp_ltx_ctrl;
+	nt_field_t *mp_ltx_addr;
+	nt_field_t *mp_ltx_cnt;
+	nt_register_t *mp_ltx_data;
+	nt_field_t *mp_ltx_data_lr;
+	nt_field_t *mp_ltx_data_tx_port;
+	nt_field_t *mp_ltx_data_tsa;
+
+	nt_register_t *mp_qst_ctrl;
+	nt_field_t *mp_qst_addr;
+	nt_field_t *mp_qst_cnt;
+	nt_register_t *mp_qst_data;
+	nt_field_t *mp_qst_data_queue;
+	nt_field_t *mp_qst_data_en;
+	nt_field_t *mp_qst_data_tx_port;
+	nt_field_t *mp_qst_data_lre;
+	nt_field_t *mp_qst_data_tci;
+	nt_field_t *mp_qst_data_ven;
+
+	nt_register_t *mp_qen_ctrl;
+	nt_field_t *mp_qen_addr;
+	nt_field_t *mp_qen_cnt;
+	nt_register_t *mp_qen_data;
+	nt_field_t *mp_qen_data_en;
+
+	nt_register_t *mp_unmq_ctrl;
+	nt_field_t *mp_unmq_addr;
+	nt_field_t *mp_unmq_cnt;
+	nt_register_t *mp_unmq_data;
+	nt_field_t *mp_unmq_data_dest_queue;
+	nt_field_t *mp_unmq_data_en;
+};
+
+typedef struct qsl_nthw qsl_nthw_t;
+
+struct qsl_nthw *qsl_nthw_new(void);
+void qsl_nthw_delete(struct qsl_nthw *p);
+int qsl_nthw_init(struct qsl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int qsl_nthw_setup(struct qsl_nthw *p, int n_idx, int n_idx_cnt);
+void qsl_nthw_set_debug_mode(struct qsl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void qsl_nthw_rcp_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_discard(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_drop(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_lo(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_hi(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_idx(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tbl_msk(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_cao(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_vli(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_rcp_flush(const struct qsl_nthw *p);
+
+/* LTX */
+void qsl_nthw_ltx_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_lr(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_tsa(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_ltx_flush(const struct qsl_nthw *p);
+
+/* QST */
+void qsl_nthw_qst_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tx_port(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_lre(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_tci(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_ven(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qst_flush(const struct qsl_nthw *p);
+
+/* QEN */
+void qsl_nthw_qen_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_qen_flush(const struct qsl_nthw *p);
+
+/* UNMQ */
+void qsl_nthw_unmq_select(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_cnt(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_dest_queue(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_en(const struct qsl_nthw *p, uint32_t val);
+void qsl_nthw_unmq_flush(const struct qsl_nthw *p);
+
+#endif /* __FLOW_NTHW_QSL_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
new file mode 100644
index 0000000000..8f519b7728
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rmc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rmc, n_debug_mode);
+}
+
+struct rmc_nthw *rmc_nthw_new(void)
+{
+	struct rmc_nthw *p = malloc(sizeof(struct rmc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void rmc_nthw_delete(struct rmc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RMC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RMC %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rmc = p_mod;
+
+	/* CTRL */
+	p->mp_ctrl = module_get_register(p->m_rmc, RMC_CTRL);
+	p->mp_ctrl_block_statt =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_STATT);
+	p->mp_ctrl_block_keep_a =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_KEEPA);
+	p->mp_ctrl_block_rpp_slice =
+		register_query_field(p->mp_ctrl, RMC_CTRL_BLOCK_RPP_SLICE);
+	p->mp_ctrl_block_mac_port =
+		register_get_field(p->mp_ctrl, RMC_CTRL_BLOCK_MAC_PORT);
+	p->mp_ctrl_lag_phy_odd_even =
+		register_get_field(p->mp_ctrl, RMC_CTRL_LAG_PHY_ODD_EVEN);
+	return 0;
+}
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt)
+{
+	(void)p;
+	(void)n_idx;
+	(void)n_idx_cnt;
+
+	return 0;
+}
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_statt, val);
+}
+
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_keep_a, val);
+}
+
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val)
+{
+	if (p->mp_ctrl_block_rpp_slice)
+		field_set_val32(p->mp_ctrl_block_rpp_slice, val);
+}
+
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_block_mac_port, val);
+}
+
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ctrl_lag_phy_odd_even, val);
+}
+
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p)
+{
+	register_flush(p->mp_ctrl, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
new file mode 100644
index 0000000000..57d5776002
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rmc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RMC_H__
+#define __FLOW_NTHW_RMC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct rmc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rmc;
+
+	nt_register_t *mp_ctrl;
+	nt_field_t *mp_ctrl_block_statt;
+	nt_field_t *mp_ctrl_block_keep_a;
+	nt_field_t *mp_ctrl_block_rpp_slice;
+	nt_field_t *mp_ctrl_block_mac_port;
+	nt_field_t *mp_ctrl_lag_phy_odd_even;
+};
+
+struct rmc_nthw *rmc_nthw_new(void);
+void rmc_nthw_delete(struct rmc_nthw *p);
+int rmc_nthw_init(struct rmc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rmc_nthw_setup(struct rmc_nthw *p, int n_idx, int n_idx_cnt);
+void rmc_nthw_set_debug_mode(struct rmc_nthw *p, unsigned int n_debug_mode);
+
+/* CTRL */
+void rmc_nthw_ctrl_block_statt(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_keep_a(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_rpp_slice(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_block_mac_port(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_lag_phy_odd_even(const struct rmc_nthw *p, uint32_t val);
+void rmc_nthw_ctrl_flush(const struct rmc_nthw *p);
+
+#endif /* __FLOW_NTHW_RMC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
new file mode 100644
index 0000000000..934778f426
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_roa.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_roa, n_debug_mode);
+}
+
+struct roa_nthw *roa_nthw_new(void)
+{
+	struct roa_nthw *p = malloc(sizeof(struct roa_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void roa_nthw_delete(struct roa_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_ROA, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: ROA %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_roa = p_mod;
+
+	/* TUN HDR */
+	p->mp_tun_hdr_ctrl = module_get_register(p->m_roa, ROA_TUNHDR_CTRL);
+	p->mp_tun_hdr_addr =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_ADR);
+	p->mp_tun_hdr_cnt =
+		register_get_field(p->mp_tun_hdr_ctrl, ROA_TUNHDR_CTRL_CNT);
+	p->mp_tun_hdr_data = module_get_register(p->m_roa, ROA_TUNHDR_DATA);
+	p->mp_tun_hdr_data_tunnel_hdr =
+		register_get_field(p->mp_tun_hdr_data, ROA_TUNHDR_DATA_TUNNEL_HDR);
+	/* TUN CFG */
+	p->mp_tun_cfg_ctrl = module_get_register(p->m_roa, ROA_TUNCFG_CTRL);
+	p->mp_tun_cfg_addr =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_ADR);
+	p->mp_tun_cfg_cnt =
+		register_get_field(p->mp_tun_cfg_ctrl, ROA_TUNCFG_CTRL_CNT);
+	p->mp_tun_cfg_data = module_get_register(p->m_roa, ROA_TUNCFG_DATA);
+	p->mp_tun_cfg_data_tun_len =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_LEN);
+	p->mp_tun_cfg_data_tun_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_TYPE);
+	p->mp_tun_cfg_data_tun_vlan =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VLAN);
+	p->mp_tun_cfg_data_ip_type =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IP_TYPE);
+	p->mp_tun_cfg_data_ipcs_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_UPD);
+	p->mp_tun_cfg_data_ipcs_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPCS_PRECALC);
+	p->mp_tun_cfg_data_iptl_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_UPD);
+	p->mp_tun_cfg_data_iptl_precalc =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_IPTL_PRECALC);
+	p->mp_tun_cfg_data_vxlan_udp_len_upd =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TUN_VXLAN_UDP_LEN_UPD);
+	p->mp_tun_cfg_data_tx_lag_ix =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_TX_LAG_IX);
+	p->mp_tun_cfg_data_recirculate =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRCULATE);
+	p->mp_tun_cfg_data_push_tunnel =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_PUSH_TUNNEL);
+	p->mp_tun_cfg_data_recirc_port =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_PORT);
+	p->mp_tun_cfg_data_recirc_bypass =
+		register_get_field(p->mp_tun_cfg_data, ROA_TUNCFG_DATA_RECIRC_BYPASS);
+	/* CONFIG */
+	p->mp_config = module_get_register(p->m_roa, ROA_CONFIG);
+	p->mp_config_fwd_recirculate =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_RECIRCULATE);
+	p->mp_config_fwd_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NORMAL_PCKS);
+	p->mp_config_fwd_tx_port0 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT0);
+	p->mp_config_fwd_tx_port1 =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_TXPORT1);
+	p->mp_config_fwd_cell_builder_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_CELLBUILDER_PCKS);
+	p->mp_config_fwd_non_normal_pcks =
+		register_get_field(p->mp_config, ROA_CONFIG_FWD_NON_NORMAL_PCKS);
+	/* LAG */
+	p->mp_lag_cfg_ctrl = module_get_register(p->m_roa, ROA_LAGCFG_CTRL);
+	p->mp_lag_cfg_addr =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_ADR);
+	p->mp_lag_cfg_cnt =
+		register_get_field(p->mp_lag_cfg_ctrl, ROA_LAGCFG_CTRL_CNT);
+	p->mp_lag_cfg_data = module_get_register(p->m_roa, ROA_LAGCFG_DATA);
+	p->mp_lag_cfg_data_tx_phy_port =
+		register_get_field(p->mp_lag_cfg_data, ROA_LAGCFG_DATA_TXPHY_PORT);
+
+	return 0;
+}
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_addr, val);
+}
+
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_hdr_cnt, val);
+}
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val)
+{
+	field_set_val(p->mp_tun_hdr_data_tunnel_hdr, val, 4);
+}
+
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_hdr_ctrl, 1);
+	register_flush(p->mp_tun_hdr_data, 1);
+}
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_addr, val);
+}
+
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_cnt, val);
+}
+
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_len, val);
+}
+
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_type, val);
+}
+
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tun_vlan, val);
+}
+
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ip_type, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_upd, val);
+}
+
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_ipcs_precalc, val);
+}
+
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_upd, val);
+}
+
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_iptl_precalc, val);
+}
+
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_vxlan_udp_len_upd, val);
+}
+
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_tx_lag_ix, val);
+};
+
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirculate, val);
+}
+
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_push_tunnel, val);
+}
+
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_port, val);
+}
+
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_tun_cfg_data_recirc_bypass, val);
+}
+
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_tun_cfg_ctrl, 1);
+	register_flush(p->mp_tun_cfg_data, 1);
+}
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_recirculate, val);
+}
+
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_normal_pcks, val);
+}
+
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port0, val);
+}
+
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_tx_port1, val);
+}
+
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_cell_builder_pcks, val);
+}
+
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_config_fwd_non_normal_pcks, val);
+}
+
+void roa_nthw_config_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_config, 1);
+}
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_addr, val);
+}
+
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_cnt, val);
+}
+
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_lag_cfg_data_tx_phy_port, val);
+}
+
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p)
+{
+	register_flush(p->mp_lag_cfg_ctrl, 1);
+	register_flush(p->mp_lag_cfg_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
new file mode 100644
index 0000000000..9398ef5ae9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_roa.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_ROA_H__
+#define __FLOW_NTHW_ROA_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct roa_nthw;
+
+typedef struct roa_nthw roa_nthw_t;
+
+struct roa_nthw *roa_nthw_new(void);
+void roa_nthw_delete(struct roa_nthw *p);
+int roa_nthw_init(struct roa_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int roa_nthw_setup(struct roa_nthw *p, int n_idx, int n_idx_cnt);
+void roa_nthw_set_debug_mode(struct roa_nthw *p, unsigned int n_debug_mode);
+
+/* TUN HDR */
+void roa_nthw_tun_hdr_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_hdr_cnt(const struct roa_nthw *p, uint32_t val);
+
+void roa_nthw_tun_hdr_tunnel_hdr(const struct roa_nthw *p, uint32_t *val);
+void roa_nthw_tun_hdr_flush(const struct roa_nthw *p);
+
+/* TUN CFG */
+void roa_nthw_tun_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_len(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tun_vlan(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ip_type(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_ipcs_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_iptl_precalc(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_vxlan_udp_len_upd(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_tx_lag_ix(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_push_tunnel(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_recirc_bypass(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_tun_cfg_flush(const struct roa_nthw *p);
+
+/* ROA CONFIG */
+void roa_nthw_config_fwd_recirculate(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port0(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_tx_port1(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_cell_builder_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_fwd_non_normal_pcks(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_config_flush(const struct roa_nthw *p);
+
+/* LAG */
+void roa_nthw_lag_cfg_select(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_cnt(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_tx_phy_port(const struct roa_nthw *p, uint32_t val);
+void roa_nthw_lag_cfg_flush(const struct roa_nthw *p);
+
+struct roa_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_roa;
+
+	nt_register_t *mp_tun_hdr_ctrl;
+	nt_field_t *mp_tun_hdr_addr;
+	nt_field_t *mp_tun_hdr_cnt;
+	nt_register_t *mp_tun_hdr_data;
+	nt_field_t *mp_tun_hdr_data_tunnel_hdr;
+
+	nt_register_t *mp_tun_cfg_ctrl;
+	nt_field_t *mp_tun_cfg_addr;
+	nt_field_t *mp_tun_cfg_cnt;
+	nt_register_t *mp_tun_cfg_data;
+	nt_field_t *mp_tun_cfg_data_tun_len;
+	nt_field_t *mp_tun_cfg_data_tun_type;
+	nt_field_t *mp_tun_cfg_data_tun_vlan;
+	nt_field_t *mp_tun_cfg_data_ip_type;
+	nt_field_t *mp_tun_cfg_data_ipcs_upd;
+	nt_field_t *mp_tun_cfg_data_ipcs_precalc;
+	nt_field_t *mp_tun_cfg_data_iptl_upd;
+	nt_field_t *mp_tun_cfg_data_iptl_precalc;
+	nt_field_t *mp_tun_cfg_data_vxlan_udp_len_upd;
+	nt_field_t *mp_tun_cfg_data_tx_lag_ix;
+	nt_field_t *mp_tun_cfg_data_recirculate;
+	nt_field_t *mp_tun_cfg_data_push_tunnel;
+	nt_field_t *mp_tun_cfg_data_recirc_port;
+	nt_field_t *mp_tun_cfg_data_recirc_bypass;
+
+	nt_register_t *mp_config;
+	nt_field_t *mp_config_fwd_recirculate;
+	nt_field_t *mp_config_fwd_normal_pcks;
+	nt_field_t *mp_config_fwd_tx_port0;
+	nt_field_t *mp_config_fwd_tx_port1;
+	nt_field_t *mp_config_fwd_cell_builder_pcks;
+	nt_field_t *mp_config_fwd_non_normal_pcks;
+
+	nt_register_t *mp_lag_cfg_ctrl;
+	nt_field_t *mp_lag_cfg_addr;
+	nt_field_t *mp_lag_cfg_cnt;
+	nt_register_t *mp_lag_cfg_data;
+	nt_field_t *mp_lag_cfg_data_tx_phy_port;
+};
+
+#endif /* __FLOW_NTHW_ROA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
new file mode 100644
index 0000000000..2ce3ce6cf8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_rpp_lr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void)
+{
+	struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: RppLr %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_rpp_lr = fpga_query_module(p_fpga, MOD_RPP_LR, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_rpp_lr, RPP_LR_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, RPP_LR_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_rpp_lr, RPP_LR_RCP_DATA);
+	p->mp_rcp_data_exp = register_get_field(p->mp_rcp_data, RPP_LR_RCP_DATA_EXP);
+
+	p->mp_ifr_rcp_ctrl = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_CTRL);
+	p->mp_ifr_rcp_addr =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_ADR);
+	p->mp_ifr_rcp_cnt =
+		register_query_field(p->mp_ifr_rcp_ctrl, RPP_LR_IFR_RCP_CTRL_CNT);
+	p->mp_ifr_rcp_data = module_query_register(p->m_rpp_lr, RPP_LR_IFR_RCP_DATA);
+	p->mp_ifr_rcp_data_en =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_EN);
+	p->mp_ifr_rcp_data_mtu =
+		register_query_field(p->mp_ifr_rcp_data, RPP_LR_IFR_RCP_DATA_MTU);
+
+	return 0;
+}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_addr);
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_cnt);
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_rcp_data_exp);
+	field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_rcp_ctrl);
+	assert(p->mp_rcp_data);
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_addr);
+	field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_cnt);
+	field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_en);
+	field_set_val32(p->mp_ifr_rcp_data_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+	assert(p->mp_ifr_rcp_data_mtu);
+	field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+	assert(p->mp_ifr_rcp_ctrl);
+	assert(p->mp_ifr_rcp_data);
+	register_flush(p->mp_ifr_rcp_ctrl, 1);
+	register_flush(p->mp_ifr_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
new file mode 100644
index 0000000000..e442c9d8d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_rpp_lr.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_RPP_LR_H__
+#define __FLOW_NTHW_RPP_LR_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct rpp_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_rpp_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_exp;
+
+	nt_register_t *mp_ifr_rcp_ctrl;
+	nt_field_t *mp_ifr_rcp_addr;
+	nt_field_t *mp_ifr_rcp_cnt;
+
+	nt_register_t *mp_ifr_rcp_data;
+	nt_field_t *mp_ifr_rcp_data_en;
+	nt_field_t *mp_ifr_rcp_data_mtu;
+};
+
+struct rpp_lr_nthw *rpp_lr_nthw_new(void);
+void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
+int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_RPP_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
new file mode 100644
index 0000000000..a409e68869
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc, n_debug_mode);
+}
+
+struct slc_nthw *slc_nthw_new(void)
+{
+	struct slc_nthw *p = malloc(sizeof(struct slc_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_nthw_delete(struct slc_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc = fpga_query_module(p_fpga, MOD_SLC, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_nthw_rcp_flush(const struct slc_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
new file mode 100644
index 0000000000..e0f58e27e4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_H__
+#define __FLOW_NTHW_SLC_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_nthw slc_nthw_t;
+
+struct slc_nthw *slc_nthw_new(void);
+void slc_nthw_delete(struct slc_nthw *p);
+int slc_nthw_init(struct slc_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_nthw_setup(struct slc_nthw *p, int n_idx, int n_idx_cnt);
+void slc_nthw_set_debug_mode(struct slc_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_nthw_rcp_select(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_cnt(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_slc_en(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_dyn(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_tail_ofs(const struct slc_nthw *p, int32_t val);
+void slc_nthw_rcp_pcap(const struct slc_nthw *p, uint32_t val);
+void slc_nthw_rcp_flush(const struct slc_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
new file mode 100644
index 0000000000..f106974bdd
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_slc_lr.h"
+
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_slc_lr, n_debug_mode);
+}
+
+struct slc_lr_nthw *slc_lr_nthw_new(void)
+{
+	struct slc_lr_nthw *p = malloc(sizeof(struct slc_lr_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+
+	return p;
+}
+
+void slc_lr_nthw_delete(struct slc_lr_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: Slc %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_slc_lr = fpga_query_module(p_fpga, MOD_SLC_LR, n_instance);
+
+	/* RCP */
+	p->mp_rcp_ctrl = module_get_register(p->m_slc_lr, SLC_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, SLC_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_slc_lr, SLC_RCP_DATA);
+	p->mp_rcp_data_tail_slc_en =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_SLC_EN);
+	p->mp_rcp_data_tail_dyn =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_DYN);
+	p->mp_rcp_data_tail_ofs =
+		register_get_field(p->mp_rcp_data, SLC_RCP_DATA_TAIL_OFS);
+	p->mp_rcp_data_pcap = register_get_field(p->mp_rcp_data, SLC_RCP_DATA_PCAP);
+
+	return 0;
+}
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_slc_en, val);
+}
+
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_dyn, val);
+}
+
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val)
+{
+	field_set_val32(p->mp_rcp_data_tail_ofs, val);
+}
+
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_pcap, val);
+}
+
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
new file mode 100644
index 0000000000..533f2efbeb
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_slc_lr.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_SLC_LR_H__
+#define __FLOW_NTHW_SLC_LR_H__
+
+#include <stdint.h> /* uint32_t */
+#include "nthw_fpga_model.h"
+
+struct slc_lr_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_slc_lr;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+	nt_register_t *mp_rcp_data;
+
+	nt_field_t *mp_rcp_data_tail_slc_en;
+	nt_field_t *mp_rcp_data_tail_dyn;
+	nt_field_t *mp_rcp_data_tail_ofs;
+	nt_field_t *mp_rcp_data_pcap;
+};
+
+typedef struct slc_lr_nthw slc_lr_nthw_t;
+
+struct slc_lr_nthw *slc_lr_nthw_new(void);
+void slc_lr_nthw_delete(struct slc_lr_nthw *p);
+int slc_lr_nthw_init(struct slc_lr_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int slc_lr_nthw_setup(struct slc_lr_nthw *p, int n_idx, int n_idx_cnt);
+void slc_lr_nthw_set_debug_mode(struct slc_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void slc_lr_nthw_rcp_select(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_cnt(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_slc_en(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_dyn(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_tail_ofs(const struct slc_lr_nthw *p, int32_t val);
+void slc_lr_nthw_rcp_pcap(const struct slc_lr_nthw *p, uint32_t val);
+void slc_lr_nthw_rcp_flush(const struct slc_lr_nthw *p);
+
+#endif /* __FLOW_NTHW_SLC_LR_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
new file mode 100644
index 0000000000..4d28d8cc3d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_cpy.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void)
+{
+	struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p)
+{
+	if (p) {
+		free(p->m_writers);
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxCpy %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_cpy = fpga_query_module(p_fpga, MOD_TX_CPY, n_instance);
+
+	const int writers_cnt =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_WRITERS, 0);
+	if (writers_cnt < 1)
+		return -1;
+
+	p->m_writers_cnt = (unsigned int)writers_cnt;
+	p->m_writers = calloc(p->m_writers_cnt, sizeof(struct tx_cpy_writers_s));
+	if (p->m_writers == NULL)
+		return -1;
+
+	const int variant =
+		fpga_get_product_param(p->mp_fpga, NT_TX_CPY_VARIANT, 0);
+
+	switch (p->m_writers_cnt) {
+	default:
+	case 6:
+		p->m_writers[5].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_CTRL);
+		p->m_writers[5].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_ADR);
+		p->m_writers[5].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[5].mp_writer_ctrl,
+					   CPY_WRITER5_CTRL_CNT);
+		p->m_writers[5].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER5_DATA);
+		p->m_writers[5].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					  CPY_WRITER5_DATA_READER_SELECT);
+		p->m_writers[5].mp_writer_data_dyn =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_DYN);
+		p->m_writers[5].mp_writer_data_ofs =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_OFS);
+		p->m_writers[5].mp_writer_data_len =
+			register_get_field(p->m_writers[5].mp_writer_data,
+					   CPY_WRITER5_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[5].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[5].mp_writer_data,
+						   CPY_WRITER5_DATA_MASK_POINTER);
+			p->m_writers[5].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_CTRL);
+			p->m_writers[5].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_ADR);
+			p->m_writers[5].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[5].mp_writer_mask_ctrl,
+						   CPY_WRITER5_MASK_CTRL_CNT);
+			p->m_writers[5].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER5_MASK_DATA);
+			p->m_writers[5].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[5].mp_writer_mask_data,
+						   CPY_WRITER5_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 5:
+		p->m_writers[4].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_CTRL);
+		p->m_writers[4].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_ADR);
+		p->m_writers[4].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[4].mp_writer_ctrl, CPY_WRITER4_CTRL_CNT);
+		p->m_writers[4].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER4_DATA);
+		p->m_writers[4].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[4].mp_writer_data,
+					   CPY_WRITER4_DATA_READER_SELECT);
+		p->m_writers[4].mp_writer_data_dyn =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_DYN);
+		p->m_writers[4].mp_writer_data_ofs =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_OFS);
+		p->m_writers[4].mp_writer_data_len =
+			register_get_field(p->m_writers[4].mp_writer_data, CPY_WRITER4_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[4].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[4].mp_writer_data,
+						   CPY_WRITER4_DATA_MASK_POINTER);
+			p->m_writers[4].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_CTRL);
+			p->m_writers[4].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_ADR);
+			p->m_writers[4].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[4].mp_writer_mask_ctrl,
+						   CPY_WRITER4_MASK_CTRL_CNT);
+			p->m_writers[4].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER4_MASK_DATA);
+			p->m_writers[4].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[4].mp_writer_mask_data,
+						   CPY_WRITER4_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 4:
+		p->m_writers[3].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_CTRL);
+		p->m_writers[3].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_ADR);
+		p->m_writers[3].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[3].mp_writer_ctrl, CPY_WRITER3_CTRL_CNT);
+		p->m_writers[3].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER3_DATA);
+		p->m_writers[3].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[3].mp_writer_data,
+					   CPY_WRITER3_DATA_READER_SELECT);
+		p->m_writers[3].mp_writer_data_dyn =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_DYN);
+		p->m_writers[3].mp_writer_data_ofs =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_OFS);
+		p->m_writers[3].mp_writer_data_len =
+			register_get_field(p->m_writers[3].mp_writer_data, CPY_WRITER3_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[3].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[3].mp_writer_data,
+						   CPY_WRITER3_DATA_MASK_POINTER);
+			p->m_writers[3].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_CTRL);
+			p->m_writers[3].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_ADR);
+			p->m_writers[3].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[3].mp_writer_mask_ctrl,
+						   CPY_WRITER3_MASK_CTRL_CNT);
+			p->m_writers[3].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER3_MASK_DATA);
+			p->m_writers[3].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[3].mp_writer_mask_data,
+						   CPY_WRITER3_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 3:
+		p->m_writers[2].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_CTRL);
+		p->m_writers[2].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_ADR);
+		p->m_writers[2].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[2].mp_writer_ctrl, CPY_WRITER2_CTRL_CNT);
+		p->m_writers[2].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER2_DATA);
+		p->m_writers[2].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[2].mp_writer_data,
+					   CPY_WRITER2_DATA_READER_SELECT);
+		p->m_writers[2].mp_writer_data_dyn =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_DYN);
+		p->m_writers[2].mp_writer_data_ofs =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_OFS);
+		p->m_writers[2].mp_writer_data_len =
+			register_get_field(p->m_writers[2].mp_writer_data, CPY_WRITER2_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[2].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[2].mp_writer_data,
+						   CPY_WRITER2_DATA_MASK_POINTER);
+			p->m_writers[2].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_CTRL);
+			p->m_writers[2].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_ADR);
+			p->m_writers[2].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[2].mp_writer_mask_ctrl,
+						   CPY_WRITER2_MASK_CTRL_CNT);
+			p->m_writers[2].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER2_MASK_DATA);
+			p->m_writers[2].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[2].mp_writer_mask_data,
+						   CPY_WRITER2_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 2:
+		p->m_writers[1].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_CTRL);
+		p->m_writers[1].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_ADR);
+		p->m_writers[1].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[1].mp_writer_ctrl, CPY_WRITER1_CTRL_CNT);
+		p->m_writers[1].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER1_DATA);
+		p->m_writers[1].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[1].mp_writer_data,
+					   CPY_WRITER1_DATA_READER_SELECT);
+		p->m_writers[1].mp_writer_data_dyn =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_DYN);
+		p->m_writers[1].mp_writer_data_ofs =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_OFS);
+		p->m_writers[1].mp_writer_data_len =
+			register_get_field(p->m_writers[1].mp_writer_data, CPY_WRITER1_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[1].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[1].mp_writer_data,
+						   CPY_WRITER1_DATA_MASK_POINTER);
+			p->m_writers[1].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_CTRL);
+			p->m_writers[1].mp_writer_mask_ctrl_addr =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_ADR);
+			p->m_writers[1].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[1].mp_writer_mask_ctrl,
+						   CPY_WRITER1_MASK_CTRL_CNT);
+			p->m_writers[1].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER1_MASK_DATA);
+			p->m_writers[1].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[1].mp_writer_mask_data,
+						   CPY_WRITER1_MASK_DATA_BYTE_MASK);
+		}
+	/* Fallthrough */
+	case 1:
+		p->m_writers[0].mp_writer_ctrl =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_CTRL);
+		p->m_writers[0].mp_writer_ctrl_addr =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_ADR);
+		p->m_writers[0].mp_writer_ctrl_cnt =
+			register_get_field(p->m_writers[0].mp_writer_ctrl, CPY_WRITER0_CTRL_CNT);
+		p->m_writers[0].mp_writer_data =
+			module_get_register(p->m_tx_cpy, CPY_WRITER0_DATA);
+		p->m_writers[0].mp_writer_data_reader_select =
+			register_get_field(p->m_writers[0].mp_writer_data,
+					   CPY_WRITER0_DATA_READER_SELECT);
+		p->m_writers[0].mp_writer_data_dyn =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_DYN);
+		p->m_writers[0].mp_writer_data_ofs =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_OFS);
+		p->m_writers[0].mp_writer_data_len =
+			register_get_field(p->m_writers[0].mp_writer_data, CPY_WRITER0_DATA_LEN);
+		if (variant != 0) {
+			p->m_writers[0].mp_writer_data_mask_pointer =
+				register_get_field(p->m_writers[0].mp_writer_data,
+						   CPY_WRITER0_DATA_MASK_POINTER);
+			p->m_writers[0].mp_writer_mask_ctrl =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_CTRL);
+			p->m_writers[0].mp_writer_mask_ctrl_addr =
+			 register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+					    CPY_WRITER0_MASK_CTRL_ADR);
+			p->m_writers[0].mp_writer_mask_ctrl_cnt =
+				register_get_field(p->m_writers[0].mp_writer_mask_ctrl,
+						   CPY_WRITER0_MASK_CTRL_CNT);
+			p->m_writers[0].mp_writer_mask_data =
+				module_get_register(p->m_tx_cpy, CPY_WRITER0_MASK_DATA);
+			p->m_writers[0].mp_writer_mask_data_byte_mask =
+				register_get_field(p->m_writers[0].mp_writer_mask_data,
+						   CPY_WRITER0_MASK_DATA_BYTE_MASK);
+		}
+		break;
+	case 0:
+		return -1;
+	}
+
+	return 0;
+}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_data_mask_pointer);
+	field_set_val32(p->m_writers[index].mp_writer_data_mask_pointer, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_data, 1);
+}
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_addr);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl_cnt);
+	field_set_val32(p->m_writers[index].mp_writer_mask_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_data_byte_mask);
+	field_set_val32(p->m_writers[index].mp_writer_mask_data_byte_mask, val);
+}
+
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+	assert(index < p->m_writers_cnt);
+	assert(p->m_writers[index].mp_writer_mask_ctrl);
+	assert(p->m_writers[index].mp_writer_mask_data);
+	register_flush(p->m_writers[index].mp_writer_mask_ctrl, 1);
+	register_flush(p->m_writers[index].mp_writer_mask_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
new file mode 100644
index 0000000000..f97983b29a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_cpy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_CPY_H__
+#define __FLOW_NTHW_TX_CPY_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_cpy_writers_s {
+	nt_register_t *mp_writer_ctrl;
+	nt_field_t *mp_writer_ctrl_addr;
+	nt_field_t *mp_writer_ctrl_cnt;
+
+	nt_register_t *mp_writer_data;
+	nt_field_t *mp_writer_data_reader_select;
+	nt_field_t *mp_writer_data_dyn;
+	nt_field_t *mp_writer_data_ofs;
+	nt_field_t *mp_writer_data_len;
+	nt_field_t *mp_writer_data_mask_pointer;
+
+	nt_register_t *mp_writer_mask_ctrl;
+	nt_field_t *mp_writer_mask_ctrl_addr;
+	nt_field_t *mp_writer_mask_ctrl_cnt;
+
+	nt_register_t *mp_writer_mask_data;
+	nt_field_t *mp_writer_mask_data_byte_mask;
+};
+
+struct tx_cpy_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_cpy;
+
+	unsigned int m_writers_cnt;
+	struct tx_cpy_writers_s *m_writers;
+};
+
+struct tx_cpy_nthw *tx_cpy_nthw_new(void);
+void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
+int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index,
+			    uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+				  uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index,
+			 uint32_t val);
+void tx_cpy_nthw_writer_mask_pointer(const struct tx_cpy_nthw *p, unsigned int index,
+				 uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+void tx_cpy_nthw_writer_mask_select(const struct tx_cpy_nthw *p, unsigned int index,
+				uint32_t val);
+void tx_cpy_nthw_writer_mask_cnt(const struct tx_cpy_nthw *p, unsigned int index,
+			     uint32_t val);
+void tx_cpy_nthw_writer_mask(const struct tx_cpy_nthw *p, unsigned int index,
+			  uint32_t val);
+void tx_cpy_nthw_writer_mask_flush(const struct tx_cpy_nthw *p, unsigned int index);
+
+#endif /* __FLOW_NTHW_TX_CPY_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
new file mode 100644
index 0000000000..998c3613ee
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_ins.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
+struct tx_ins_nthw *tx_ins_nthw_new(void)
+{
+	struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_ins_nthw_delete(struct tx_ins_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxIns %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_ins = fpga_query_module(p_fpga, MOD_TX_INS, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_ins, INS_RCP_CTRL);
+	p->mp_rcp_addr = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_ADR);
+	p->mp_rcp_cnt = register_get_field(p->mp_rcp_ctrl, INS_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_ins, INS_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, INS_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, INS_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, INS_RCP_DATA_LEN);
+
+	return 0;
+}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
new file mode 100644
index 0000000000..813bd30c62
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_ins.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_INS_H__
+#define __FLOW_NTHW_TX_INS_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_ins_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_ins;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_addr;
+	nt_field_t *mp_rcp_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+};
+
+struct tx_ins_nthw *tx_ins_nthw_new(void);
+void tx_ins_nthw_delete(struct tx_ins_nthw *p);
+int tx_ins_nthw_init(struct tx_ins_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_INS_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
new file mode 100644
index 0000000000..5e7e26f74d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_register.h"
+
+#include "flow_nthw_tx_rpl.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+	module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void)
+{
+	struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
+
+	if (p)
+		(void)memset(p, 0, sizeof(*p));
+	return p;
+}
+
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p)
+{
+	if (p) {
+		(void)memset(p, 0, sizeof(*p));
+		free(p);
+	}
+}
+
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance)
+{
+	const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str;
+	nt_module_t *p_mod = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	assert(n_instance >= 0 && n_instance < 256);
+
+	if (p == NULL)
+		return p_mod == NULL ? -1 : 0;
+
+	if (p_mod == NULL) {
+		NT_LOG(ERR, NTHW, "%s: TxRpl %d: no such instance\n",
+		       p_adapter_id_str, n_instance);
+		return -1;
+	}
+
+	p->mp_fpga = p_fpga;
+	p->m_physical_adapter_no = (uint8_t)n_instance;
+	p->m_tx_rpl = fpga_query_module(p_fpga, MOD_TX_RPL, n_instance);
+
+	p->mp_rcp_ctrl = module_get_register(p->m_tx_rpl, RPL_RCP_CTRL);
+	p->mp_rcp_ctrl_addr = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_ADR);
+	p->mp_rcp_ctrl_cnt = register_get_field(p->mp_rcp_ctrl, RPL_RCP_CTRL_CNT);
+	p->mp_rcp_data = module_get_register(p->m_tx_rpl, RPL_RCP_DATA);
+	p->mp_rcp_data_dyn = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_DYN);
+	p->mp_rcp_data_ofs = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_OFS);
+	p->mp_rcp_data_len = register_get_field(p->mp_rcp_data, RPL_RCP_DATA_LEN);
+	p->mp_rcp_data_rpl_ptr =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_RPL_PTR);
+	p->mp_rcp_data_ext_prio =
+		register_get_field(p->mp_rcp_data, RPL_RCP_DATA_EXT_PRIO);
+
+	p->mp_ext_ctrl = module_get_register(p->m_tx_rpl, RPL_EXT_CTRL);
+	p->mp_ext_ctrl_addr = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_ADR);
+	p->mp_ext_ctrl_cnt = register_get_field(p->mp_ext_ctrl, RPL_EXT_CTRL_CNT);
+	p->mp_ext_data = module_get_register(p->m_tx_rpl, RPL_EXT_DATA);
+	p->mp_ext_data_rpl_ptr =
+		register_get_field(p->mp_ext_data, RPL_EXT_DATA_RPL_PTR);
+
+	p->mp_rpl_ctrl = module_get_register(p->m_tx_rpl, RPL_RPL_CTRL);
+	p->mp_rpl_ctrl_addr = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_ADR);
+	p->mp_rpl_ctrl_cnt = register_get_field(p->mp_rpl_ctrl, RPL_RPL_CTRL_CNT);
+	p->mp_rpl_data = module_get_register(p->m_tx_rpl, RPL_RPL_DATA);
+	p->mp_rpl_data_value = register_get_field(p->mp_rpl_data, RPL_RPL_DATA_VALUE);
+
+	return 0;
+}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rcp_ctrl, 1);
+	register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_ext_ctrl, 1);
+	register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+	field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+	field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+	register_flush(p->mp_rpl_ctrl, 1);
+	register_flush(p->mp_rpl_data, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
new file mode 100644
index 0000000000..e5f724361b
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_tx_rpl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_NTHW_TX_RPL_H__
+#define __FLOW_NTHW_TX_RPL_H__
+
+#include <stdint.h>
+#include "nthw_fpga_model.h"
+
+struct tx_rpl_nthw {
+	uint8_t m_physical_adapter_no;
+	nt_fpga_t *mp_fpga;
+
+	nt_module_t *m_tx_rpl;
+
+	nt_register_t *mp_rcp_ctrl;
+	nt_field_t *mp_rcp_ctrl_addr;
+	nt_field_t *mp_rcp_ctrl_cnt;
+
+	nt_register_t *mp_rcp_data;
+	nt_field_t *mp_rcp_data_dyn;
+	nt_field_t *mp_rcp_data_ofs;
+	nt_field_t *mp_rcp_data_len;
+	nt_field_t *mp_rcp_data_rpl_ptr;
+	nt_field_t *mp_rcp_data_ext_prio;
+
+	nt_register_t *mp_ext_ctrl;
+	nt_field_t *mp_ext_ctrl_addr;
+	nt_field_t *mp_ext_ctrl_cnt;
+
+	nt_register_t *mp_ext_data;
+	nt_field_t *mp_ext_data_rpl_ptr;
+
+	nt_register_t *mp_rpl_ctrl;
+	nt_field_t *mp_rpl_ctrl_addr;
+	nt_field_t *mp_rpl_ctrl_cnt;
+
+	nt_register_t *mp_rpl_data;
+	nt_field_t *mp_rpl_data_value;
+};
+
+struct tx_rpl_nthw *tx_rpl_nthw_new(void);
+void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
+int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nt_fpga_t *p_fpga, int n_instance);
+
+int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
+
+#endif /* __FLOW_NTHW_TX_RPL_H__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v16 5/8] net/ntnic: adds FPGA abstraction layer
  2023-09-08 16:07 ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (2 preceding siblings ...)
  2023-09-08 16:07   ` [PATCH v16 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
@ 2023-09-08 16:07   ` Mykola Kostenok
  2023-09-08 16:07   ` [PATCH v16 6/8] net/ntnic: adds flow logic Mykola Kostenok
                     ` (3 subsequent siblings)
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-08 16:07 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The FPGA abstraction layer limits the need to rewrite flow logic
when new FPGA modules are created.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed compilation with Fedora 38
* Fixed WARNING:TYPO_SPELLING
v11:
* Fix dereferencing type-punned pointer in macro
v13:
* Fix typo spelling warnings
---
 drivers/net/ntnic/meson.build                 |   21 +
 .../ntnic/nthw/flow_api/flow_api_actions.c    |  205 ++
 .../ntnic/nthw/flow_api/flow_api_actions.h    |  284 +++
 .../ntnic/nthw/flow_api/flow_api_backend.c    |  182 ++
 .../ntnic/nthw/flow_api/flow_api_backend.h    | 1818 +++++++++++++++++
 .../net/ntnic/nthw/flow_api/flow_api_engine.h |  475 +++++
 .../ntnic/nthw/flow_api/flow_api_nic_setup.h  |   32 +
 .../nthw/flow_api/flow_engine/flow_group.c    |  125 ++
 .../nthw/flow_api/flow_engine/flow_hasher.c   |  213 ++
 .../nthw/flow_api/flow_engine/flow_hasher.h   |   20 +
 .../nthw/flow_api/flow_engine/flow_kcc.c      |  434 ++++
 .../ntnic/nthw/flow_api/flow_engine/flow_km.c | 1434 +++++++++++++
 .../nthw/flow_api/flow_engine/flow_tunnel.c   |  787 +++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c   | 1789 ++++++++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v18.h     |  138 ++
 .../nthw/flow_api/hw_mod/hw_mod_cat_v21.h     |   88 +
 .../nthw/flow_api/hw_mod/hw_mod_cat_v22.h     |   83 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 1099 ++++++++++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v17.h     |  265 +++
 .../nthw/flow_api/hw_mod/hw_mod_flm_v20.h     |  102 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c   |  195 ++
 .../nthw/flow_api/hw_mod/hw_mod_hsh_v5.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c   |  178 ++
 .../nthw/flow_api/hw_mod/hw_mod_hst_v2.h      |   32 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c   |  271 +++
 .../nthw/flow_api/hw_mod/hw_mod_ioa_v4.h      |   36 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km.c    |  629 ++++++
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h |   93 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c   |  219 ++
 .../nthw/flow_api/hw_mod/hw_mod_pdb_v9.h      |   39 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c   |  348 ++++
 .../nthw/flow_api/hw_mod/hw_mod_qsl_v7.h      |   45 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c   |  112 +
 .../nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h    |   20 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c   |  358 ++++
 .../nthw/flow_api/hw_mod/hw_mod_roa_v6.h      |   49 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c   |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr.c      |  132 ++
 .../nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h   |   19 +
 .../nthw/flow_api/hw_mod/hw_mod_slc_v1.h      |   19 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   |  983 +++++++++
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v1.h      |  103 +
 .../nthw/flow_api/hw_mod/hw_mod_tpe_v2.h      |   37 +
 .../nthw/flow_api/stream_binary_flow_api.h    |  697 +++++++
 44 files changed, 14376 insertions(+)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8c065ee9a3..8a5a3d5deb 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -18,6 +18,7 @@ includes = [
     include_directories('nthw'),
     include_directories('nthw/core'),
     include_directories('nthw/supported'),
+    include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api_actions.c',
+    'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_engine/flow_group.c',
+    'nthw/flow_api/flow_engine/flow_hasher.c',
+    'nthw/flow_api/flow_engine/flow_kcc.c',
+    'nthw/flow_api/flow_engine/flow_km.c',
+    'nthw/flow_api/flow_engine/flow_tunnel.c',
+    'nthw/flow_api/hw_mod/hw_mod_cat.c',
+    'nthw/flow_api/hw_mod/hw_mod_flm.c',
+    'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+    'nthw/flow_api/hw_mod/hw_mod_hst.c',
+    'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+    'nthw/flow_api/hw_mod/hw_mod_km.c',
+    'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+    'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+    'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+    'nthw/flow_api/hw_mod/hw_mod_roa.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc.c',
+    'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+    'nthw/flow_api/hw_mod/hw_mod_tpe.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
new file mode 100644
index 0000000000..945ab7d743
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun)
+{
+	int err = 0;
+	int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+	/*
+	 * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+	 */
+	for (int i = 0; (i < num_writes) && !err; i++) {
+		for (int ii = 0; (ii < 4) && !err; ii++) {
+			/* must write each 4 words backwards! */
+			err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+						     index, i * 4 + ii,
+						     ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+		}
+	}
+
+	return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+	/*
+	 * If tunnel header specified
+	 */
+	int tun_len = get_roa_tunhdr_len(color_actions);
+
+	if (tun_len) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+				      tun_len);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+				      roa_get_tun_type(color_actions));
+
+		/* set the total tunnel IP header length */
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 */
+			if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+					       sizeof(struct flow_elem_ipv6))) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+				/* tunnel header length excludes the IPv6 header itself */
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   (sizeof(struct flow_elem_eth) +
+						    sizeof(struct flow_elem_ipv6))));
+			}
+		} else {
+			/* IPv4 */
+			if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+						      index, 1);
+				hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+					(uint32_t)(tun_len -
+						   sizeof(struct flow_elem_eth)));
+			}
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+				      get_roa_tun_ip_type(color_actions));
+
+		if (get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 - Do not update the IP checksum in the tunnel header */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+					      0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index, 0);
+		} else {
+			/* IPv4 */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+					      index, 1);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+					      index,
+					      get_roa_tun_ip_csum(color_actions));
+		}
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+				      index, 1);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+	}
+
+	/* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+	uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+	if (recirculate_bypass) {
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      recirculate_bypass);
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+	} else {
+		int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+		hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+				      255);
+
+		if (recirculate_port >= 0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+					      index, recirculate_port);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+		}
+	}
+
+	uint8_t tx = roa_get_tx(color_actions);
+
+	if (tx) {
+		if (tx == DESTINATION_TX_PHY0) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+		} else if (tx == DESTINATION_TX_PHY1) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY1);
+		} else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+					      index, ROA_TX_PHY0);
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+					      index, 0x81); /* port 1 - only port left */
+			hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+					      index, 1);
+
+		} else {
+			return -1; /* ERR */
+		}
+	}
+
+	/*
+	 * Special IOA memory that contains ROA information - bad FPGA design
+	 */
+	if (tx || tun_len) {
+		if (be->ioa.ver > 3 && tun_len &&
+				get_roa_tun_ip_type(color_actions) == 1) {
+			/* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, 2);
+		} else {
+			/* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+			hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+					       index, !!tun_len);
+		}
+		hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+	}
+
+	return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions)
+{
+	if (color_actions & ioa_set_vxlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+	}
+
+	if (color_actions & ioa_set_vlan_pop(0)) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+		NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+	}
+
+	int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+	if (color_actions & ioa_set_vlan_push(0, 0)) {
+		uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+		NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+		       tpid_sel ? 0x88a8 : 0x8100, tci);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+				   tci & 0x0FFF);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+				   (tci >> 12) & 0x1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+				   (tci >> 13) & 0x7);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+	}
+
+	int queue = ioa_get_queue(color_actions);
+
+	if (queue >= 0) {
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+		hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+	}
+
+	hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
new file mode 100644
index 0000000000..400066e817
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_actions.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+	DESTINATION_TX_NONE = 0,
+	DESTINATION_TX_PHY0 = 1,
+	DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+	VLAN_TPID_802_1Q = 0,
+	VLAN_TPID_802_1AD,
+	VLAN_TPID_CUSTOM_0,
+	VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0   Mark (color) 16384 flow stats
+ * 21:14  IOA index      256 entries
+ * 29:22  ROA index      256 entries
+ * 31:30  1 to indicate this layout
+ * or
+ *  9:0   Mark (color) 1024 flow stats
+ * 19:10  IOA index    1024 entries
+ * 29:20  ROA index    1024 entries
+ * 31:30  0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+		uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0x3ff) << 10) |
+				((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+	uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+				((ioa_rcp & 0xff) << 14) |
+				((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+	return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ *  9:0   Mark (color) 1024 flow stats
+ * 17:10  IOA index     256 entries
+ * 25:18  ROA index     256 entries
+ * 30:26  QSL and HSH    32 recipes indexable
+ * 31:31  CAO               implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+					uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+	uint32_t color_action = (mark & FLOW_MARK_MASK) |
+				((ioa_rcp & IOA_RCP_MASK) << 10) |
+				((roa_rcp & ROA_RCP_MASK) << 18) |
+				((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+	return color_action;
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  roa config settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  ROA config  bit offs  bits
+ *  ----------------------------
+ *  recirc port      7:0    8   -> uses hbx > 0
+ *  recirc bypass   15:8    8   -> uses hbx > 0  if set, will override
+ *  tunnel type     19:16   4
+ *  tx port         23:20   4   -> txport + 1
+ *  tun_ip_type     24:24   1
+ *  recirculate     25:25   1   -> recirculate port set
+ *  tunhdr_len      33:26   8   -> tunnel header length - 0 if none
+ *  ip_csum_prec    49:34  16   -> tunnel ip header checksum pre-calculated
+ *  new_recirc_port 50:50   1   -> indication of a new port for recirculate has been allocated.
+ *                                 Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+	actions |= 1ULL << 50;
+	return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+	return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+	actions |= (uint64_t)(ip_type & 1) << 24;
+	return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+	actions |= (uint64_t)csum << 34;
+	return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+	return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+	actions |= (uint64_t)length << 26;
+	return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+	return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+	actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+	return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+	return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+	actions |= (type & 0x0f) << 16;
+	return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+	return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+	actions |= (1ULL << 25) | port;
+	return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+	if (!((1ULL << 25) & actions))
+		return -1;
+	return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+	actions |= ((uint64_t)port & 0xff) << 8;
+	return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+	return ((actions >> 8) & 0xff);
+}
+
+/*
+ *  This is a bitmask representation in SW for
+ *  ioa action settings. It is mostly done for
+ *  effective cache matching
+ *
+ *  IOA action    bit offs    bits
+ *  --------------------------------
+ *  tci         15:0    16
+ *  queue     23:16   8  uses hbx
+ *  tpid select   27:24   4
+ *  pop vxlan    28     1
+ *  pop vlan     29     1
+ *  push vlan    30     1
+ *  queue override   31     1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+	actions |= (1 << 31) | ((uint64_t)hb << 16);
+	return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+	if (!(actions & (1 << 31)))
+		return -1;
+	return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+	actions |= 1 << 28;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+	actions |= 1 << 29;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+	actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+	return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+	return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+	actions |= (1 << 30) | tci;
+	return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+	actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+	return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+	return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+				   struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+				   uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
new file mode 100644
index 0000000000..f4d71acb51
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+	const char *name;
+	int (*allocate)(struct flow_api_backend_s *be);
+	void (*free)(struct flow_api_backend_s *be);
+	int (*reset)(struct flow_api_backend_s *be);
+	bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+	{	"CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+		hw_mod_cat_present
+	},
+	{	"KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+		hw_mod_km_present
+	},
+	{	"FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+		hw_mod_flm_present
+	},
+	{	"HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+		hw_mod_hsh_present
+	},
+	{	"HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+		hw_mod_hst_present
+	},
+	{	"QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+		hw_mod_qsl_present
+	},
+	{	"SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+		hw_mod_slc_present
+	},
+	{	"SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+		hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+	},
+	{	"PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+		hw_mod_pdb_present
+	},
+	{	"IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+		hw_mod_ioa_present
+	},
+	{	"ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+		hw_mod_roa_present
+	},
+	{	"RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+		hw_mod_rmc_present
+	},
+	{	"TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+		hw_mod_tpe_present
+	},
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+	void *base = NULL;
+	void **plist[MAX_SETS];
+	int len[MAX_SETS];
+	int offs[MAX_SETS];
+	unsigned int total_bytes = 0;
+	int cnt, elem_size;
+
+	assert(sets <= MAX_SETS);
+	assert(sets > 0);
+
+	va_list args;
+
+	va_start(args, sets);
+
+	for (int i = 0; i < sets; i++) {
+		plist[i] = va_arg(args, void *);
+		cnt = va_arg(args, int);
+		elem_size = va_arg(args, int);
+		offs[i] = EXTRA_INDEXES * elem_size;
+		len[i] = offs[i] + cnt * elem_size;
+		total_bytes += len[i];
+	}
+	base = calloc(1, total_bytes);
+	if (base) {
+		char *p_b = (char *)base;
+
+		for (int i = 0; i < sets; i++) {
+			(*plist[i]) = (void *)((char *)p_b + offs[i]);
+			p_b += len[i];
+		}
+	} else {
+		NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+	}
+
+	va_end(args);
+
+	mod->base = base;
+	mod->allocated_size = total_bytes;
+
+	return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+	memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev)
+{
+	assert(dev);
+	dev->iface = iface;
+	dev->be_dev = be_dev;
+	dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+	dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+	dev->max_categories = iface->get_nb_categories(be_dev);
+	dev->max_queues = iface->get_nb_queues(be_dev);
+
+	NT_LOG(DBG, FILTER,
+	       "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+	/*
+	 * Create Cache and SW, version independent, NIC module representation
+	 */
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (!module[mod].present(dev))
+			continue;
+		if (module[mod].allocate(dev) == 0 &&
+				module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Initialization of NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			flow_api_backend_done(dev);
+			NT_LOG(ERR, FILTER,
+			       "*************** Failed to create Binary Flow API *******************\n");
+			NT_LOG(ERR, FILTER,
+			       "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+			NT_LOG(ERR, FILTER,
+			       "********************************************************************\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+	assert(dev);
+
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+		if (module[mod].reset(dev) == 0) {
+			/* OK */
+			continue;
+		} else {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Resetting NIC module failed : [ %s ]\n",
+			       module[mod].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+	for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+		module[mod].free(dev);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
new file mode 100644
index 0000000000..c3386adea9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_backend.h
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+	return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+	return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+	       func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+	NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+		func);
+	return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+	const char *mod, int ver)
+{
+	NT_LOG(INF, FILTER,
+	       "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+	       "%s ver %i.%i\n",
+	       func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+	return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = *cached_val;
+	else
+		*cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+	if (get)
+		*val = (uint32_t)*cached_val;
+	else
+		*cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int start,
+	unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+	unsigned int i;
+	if (!get)
+		return error_unsup_field(func);
+	*value = NOT_FOUND;
+	if (start >= nb_elements)
+		return error_index_too_large(func);
+	for (i = start; i < nb_elements; i++) {
+		if (idx == i)
+			continue;
+		if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+			   (uint8_t *)be_module_reg + i * type_size,
+			   type_size) == 0) {
+			*value = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+	unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+	unsigned int nb_elements, int get, const char *func)
+{
+	if (!get)
+		return error_unsup_field(func);
+	if (cmp_idx >= nb_elements)
+		return error_index_too_large(func);
+	if (idx != cmp_idx &&
+	    (memcmp((uint8_t *)be_module_reg + idx * type_size,
+		    (uint8_t *)be_module_reg + cmp_idx * type_size,
+		    type_size) == 0))
+		return 1;
+	return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0)
+			return 1;
+	}
+	return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+	size_t i = 0;
+	const uint8_t *p = (const uint8_t *)addr;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != 0xff)
+			return 0;
+	}
+	return 1;
+}
+
+enum cte_index_e {
+	CT_COL = 0,
+	CT_COR = 1,
+	CT_HSH = 2,
+	CT_QSL = 3,
+	CT_IPF = 4,
+	CT_SLC = 5,
+	CT_PDB = 6,
+	CT_MSK = 7,
+	CT_HST = 8,
+	CT_EPP = 9,
+	CT_TPE = 10,
+	CT_RRB = 11,
+	CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+	DYN_SOF = 0,
+	DYN_L2 = 1,
+	DYN_FIRST_VLAN = 2,
+	DYN_MPLS = 3,
+	DYN_L3 = 4,
+	DYN_ID_IPV4_6 = 5,
+	DYN_FINAL_IP_DST = 6,
+	DYN_L4 = 7,
+	DYN_L4_PAYLOAD = 8,
+	DYN_TUN_PAYLOAD = 9,
+	DYN_TUN_L2 = 10,
+	DYN_TUN_VLAN = 11,
+	DYN_TUN_MPLS = 12,
+	DYN_TUN_L3 = 13,
+	DYN_TUN_ID_IPV4_6 = 14,
+	DYN_TUN_FINAL_IP_DST = 15,
+	DYN_TUN_L4 = 16,
+	DYN_TUN_L4_PAYLOAD = 17,
+	DYN_EOF = 18,
+	DYN_L3_PAYLOAD_END = 19,
+	DYN_TUN_L3_PAYLOAD_END = 20,
+	SB_VNI = SWX_INFO | 1,
+	SB_MAC_PORT = SWX_INFO | 2,
+	SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+	QW0_SEL_EXCLUDE = 0,
+	QW0_SEL_FIRST32 = 1,
+	QW0_SEL_SECOND32 = 2,
+	QW0_SEL_FIRST64 = 3,
+	QW0_SEL_ALL128 = 4,
+};
+
+enum {
+	QW4_SEL_EXCLUDE = 0,
+	QW4_SEL_FIRST32 = 1,
+	QW4_SEL_FIRST64 = 2,
+	QW4_SEL_ALL128 = 3,
+};
+
+enum {
+	SW8_SEL_EXCLUDE = 0,
+	SW8_SEL_FIRST16 = 1,
+	SW8_SEL_SECOND16 = 2,
+	SW8_SEL_ALL32 = 3,
+};
+
+enum {
+	DW8_SEL_EXCLUDE = 0,
+	DW8_SEL_FIRST16 = 1,
+	DW8_SEL_SECOND16 = 2,
+	DW8_SEL_FIRST32 = 3,
+	DW8_SEL_FIRST32_SWAP16 = 4,
+	DW8_SEL_ALL64 = 5,
+};
+
+enum {
+	SW9_SEL_EXCLUDE = 0,
+	SW9_SEL_FIRST16 = 1,
+	SW9_SEL_ALL32 = 2,
+};
+
+enum {
+	DW10_SEL_EXCLUDE = 0,
+	DW10_SEL_FIRST16 = 1,
+	DW10_SEL_FIRST32 = 2,
+	DW10_SEL_ALL64 = 3,
+};
+
+enum {
+	SWX_SEL_EXCLUDE = 0,
+	SWX_SEL_ALL32 = 1,
+};
+
+enum {
+	PROT_OTHER = 0,
+	PROT_L2_ETH2 = 1,
+	PROT_L2_SNAP = 2,
+	PROT_L2_LLC = 3,
+	PROT_L2_RAW = 4,
+	PROT_L2_PPPOE_D = 5,
+	PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+	PROT_TUN_IP_IN_IP = 1,
+	PROT_TUN_ETHER_IP = 2,
+	PROT_TUN_GREV0 = 3,
+	PROT_TUN_GREV1 = 4,
+	PROT_TUN_GTPV0U = 5,
+	PROT_TUN_GTPV1U = 6,
+	PROT_TUN_GTPV1C = 7,
+	PROT_TUN_GTPV2C = 8,
+	PROT_TUN_VXLAN = 9,
+	PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+	PROT_TUN_L4_OTHER = 0,
+	PROT_TUN_L4_TCP = 1,
+	PROT_TUN_L4_UDP = 2,
+	PROT_TUN_L4_SCTP = 3,
+	PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+	IP_FRAG_NOT_A_FRAG = 0,
+	IP_FRAG_FIRST = 1,
+	IP_FRAG_MIDDLE = 2,
+	IP_FRAG_LAST = 3
+};
+
+enum {
+	HASH_HASH_NONE = 0,
+	HASH_USER_DEFINED = 1,
+	HASH_LAST_MPLS_LABEL = 2,
+	HASH_ALL_MPLS_LABELS = 3,
+	HASH_2TUPLE = 4,
+	HASH_2TUPLESORTED = 5,
+	HASH_LAST_VLAN_ID = 6,
+	HASH_ALL_VLAN_IDS = 7,
+	HASH_5TUPLE = 8,
+	HASH_5TUPLESORTED = 9,
+	HASH_3TUPLE_GRE_V0 = 10,
+	HASH_3TUPLE_GRE_V0_SORTED = 11,
+	HASH_5TUPLE_SCTP = 12,
+	HASH_5TUPLE_SCTP_SORTED = 13,
+	HASH_3TUPLE_GTP_V0 = 14,
+	HASH_3TUPLE_GTP_V0_SORTED = 15,
+	HASH_3TUPLE_GTP_V1V2 = 16,
+	HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+	HASH_HASHINNER_2TUPLE = 18,
+	HASH_HASHINNER_2TUPLESORTED = 19,
+	HASH_HASHINNER_5TUPLE = 20,
+	HASH_HASHINNER_5TUPLESORTED = 21,
+	HASH_KM = 30,
+	HASH_ROUND_ROBIN = 31,
+	HASH_OUTER_DST_IP = 32,
+	HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+	CPY_SELECT_DSCP_IPV4 = 0,
+	CPY_SELECT_DSCP_IPV6 = 1,
+	CPY_SELECT_RQI_QFI = 2,
+	CPY_SELECT_IPV4 = 3,
+	CPY_SELECT_PORT = 4,
+	CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S         \
+	int ver;                   \
+	void *base;                \
+	unsigned int allocated_size; \
+	int debug
+
+struct common_func_s {
+	COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_cat_funcs;
+	uint32_t nb_flow_types;
+	uint32_t nb_pm_ext;
+	uint32_t nb_len;
+	uint32_t kcc_size;
+	uint32_t cts_num;
+	uint32_t kcc_banks;
+	uint32_t kcc_id_bit_size;
+	uint32_t kcc_records;
+	uint32_t km_if_count;
+	int32_t km_if_m0;
+	int32_t km_if_m1;
+
+	union {
+		struct hw_mod_cat_v18_s v18;
+		struct hw_mod_cat_v21_s v21;
+		struct hw_mod_cat_v22_s v22;
+	};
+};
+
+enum hw_cat_e {
+	/*
+	 *  functions initial CAT v18
+	 */
+	/* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+	/* 01 */ HW_CAT_CFN_PRESET_ALL,
+	/* 02 */ HW_CAT_CFN_COMPARE,
+	/* 03 */ HW_CAT_CFN_FIND,
+	/* 04 */ HW_CAT_CFN_COPY_FROM,
+	/* 05 */ HW_CAT_COT_PRESET_ALL,
+	/* 06 */ HW_CAT_COT_COMPARE,
+	/* 07 */ HW_CAT_COT_FIND,
+	/* fields */
+	/* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+	/* 01 */ HW_CAT_CFN_INV,
+	/* 02 */ HW_CAT_CFN_PTC_INV,
+	/* 03 */ HW_CAT_CFN_PTC_ISL,
+	/* 04 */ HW_CAT_CFN_PTC_CFP,
+	/* 05 */ HW_CAT_CFN_PTC_MAC,
+	/* 06 */ HW_CAT_CFN_PTC_L2,
+	/* 07 */ HW_CAT_CFN_PTC_VNTAG,
+	/* 08 */ HW_CAT_CFN_PTC_VLAN,
+	/* 09 */ HW_CAT_CFN_PTC_MPLS,
+	/* 10 */ HW_CAT_CFN_PTC_L3,
+	/* 11 */ HW_CAT_CFN_PTC_FRAG,
+	/* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+	/* 13 */ HW_CAT_CFN_PTC_L4,
+	/* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+	/* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+	/* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+	/* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+	/* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+	/* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+	/* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+	/* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+	/* 22 */ HW_CAT_CFN_ERR_INV,
+	/* 23 */ HW_CAT_CFN_ERR_CV,
+	/* 24 */ HW_CAT_CFN_ERR_FCS,
+	/* 25 */ HW_CAT_CFN_ERR_TRUNC,
+	/* 26 */ HW_CAT_CFN_ERR_L3_CS,
+	/* 27 */ HW_CAT_CFN_ERR_L4_CS,
+	/* 28 */ HW_CAT_CFN_MAC_PORT,
+	/* 29 */ HW_CAT_CFN_PM_CMP,
+	/* 30 */ HW_CAT_CFN_PM_DCT,
+	/* 31 */ HW_CAT_CFN_PM_EXT_INV,
+	/* 32 */ HW_CAT_CFN_PM_CMB,
+	/* 33 */ HW_CAT_CFN_PM_AND_INV,
+	/* 34 */ HW_CAT_CFN_PM_OR_INV,
+	/* 35 */ HW_CAT_CFN_PM_INV,
+	/* 36 */ HW_CAT_CFN_LC,
+	/* 37 */ HW_CAT_CFN_LC_INV,
+	/* 38 */ HW_CAT_CFN_KM0_OR,
+	/* 39 */ HW_CAT_CFN_KM1_OR,
+	/* 40 */ HW_CAT_KCE_ENABLE_BM,
+	/* 41 */ HW_CAT_KCS_CATEGORY,
+	/* 42 */ HW_CAT_FTE_ENABLE_BM,
+	/* 43 */ HW_CAT_CTE_ENABLE_BM,
+	/* 44 */ HW_CAT_CTS_CAT_A,
+	/* 45 */ HW_CAT_CTS_CAT_B,
+	/* 46 */ HW_CAT_COT_COLOR,
+	/* 47 */ HW_CAT_COT_KM,
+	/* 48 */ HW_CAT_CCT_COLOR,
+	/* 49 */ HW_CAT_CCT_KM,
+	/* 50 */ HW_CAT_KCC_KEY,
+	/* 51 */ HW_CAT_KCC_CATEGORY,
+	/* 52 */ HW_CAT_KCC_ID,
+	/* 53 */ HW_CAT_EXO_DYN,
+	/* 54 */ HW_CAT_EXO_OFS,
+	/* 55 */ HW_CAT_RCK_DATA,
+	/* 56 */ HW_CAT_LEN_LOWER,
+	/* 57 */ HW_CAT_LEN_UPPER,
+	/* 58 */ HW_CAT_LEN_DYN1,
+	/* 59 */ HW_CAT_LEN_DYN2,
+	/* 60 */ HW_CAT_LEN_INV,
+	/* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+	/* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+	/* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+	/* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+	/* 65 */ HW_CAT_CCE_IMM,
+	/* 66 */ HW_CAT_CCE_IND,
+	/* 67 */ HW_CAT_CCS_COR_EN,
+	/* 68 */ HW_CAT_CCS_COR,
+	/* 69 */ HW_CAT_CCS_HSH_EN,
+	/* 70 */ HW_CAT_CCS_HSH,
+	/* 71 */ HW_CAT_CCS_QSL_EN,
+	/* 72 */ HW_CAT_CCS_QSL,
+	/* 73 */ HW_CAT_CCS_IPF_EN,
+	/* 74 */ HW_CAT_CCS_IPF,
+	/* 75 */ HW_CAT_CCS_SLC_EN,
+	/* 76 */ HW_CAT_CCS_SLC,
+	/* 77 */ HW_CAT_CCS_PDB_EN,
+	/* 78 */ HW_CAT_CCS_PDB,
+	/* 79 */ HW_CAT_CCS_MSK_EN,
+	/* 80 */ HW_CAT_CCS_MSK,
+	/* 81 */ HW_CAT_CCS_HST_EN,
+	/* 82 */ HW_CAT_CCS_HST,
+	/* 83 */ HW_CAT_CCS_EPP_EN,
+	/* 84 */ HW_CAT_CCS_EPP,
+	/* 85 */ HW_CAT_CCS_TPE_EN,
+	/* 86 */ HW_CAT_CCS_TPE,
+	/* 87 */ HW_CAT_CCS_RRB_EN,
+	/* 88 */ HW_CAT_CCS_RRB,
+	/* 89 */ HW_CAT_CCS_SB0_TYPE,
+	/* 90 */ HW_CAT_CCS_SB0_DATA,
+	/* 91 */ HW_CAT_CCS_SB1_TYPE,
+	/* 92 */ HW_CAT_CCS_SB1_DATA,
+	/* 93 */ HW_CAT_CCS_SB2_TYPE,
+	/* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value);
+
+struct km_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_cam_banks;
+	uint32_t nb_cam_record_words;
+	uint32_t nb_cam_records;
+	uint32_t nb_tcam_banks;
+	uint32_t nb_tcam_bank_width;
+	/* not read from backend, but rather set using version */
+	uint32_t nb_km_rcp_mask_a_word_size;
+	uint32_t nb_km_rcp_mask_b_word_size;
+	union {
+		struct hw_mod_km_v7_s v7;
+	};
+};
+
+enum hw_km_e {
+	/* functions */
+	HW_KM_RCP_PRESET_ALL = 0,
+	HW_KM_CAM_PRESET_ALL,
+	/* to sync and reset hw with cache - force write all entries in a bank */
+	HW_KM_TCAM_BANK_RESET,
+	/* fields */
+	HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+	HW_KM_RCP_QW0_OFS,
+	HW_KM_RCP_QW0_SEL_A,
+	HW_KM_RCP_QW0_SEL_B,
+	HW_KM_RCP_QW4_DYN,
+	HW_KM_RCP_QW4_OFS,
+	HW_KM_RCP_QW4_SEL_A,
+	HW_KM_RCP_QW4_SEL_B,
+	HW_KM_RCP_DW8_DYN,
+	HW_KM_RCP_DW8_OFS,
+	HW_KM_RCP_DW8_SEL_A,
+	HW_KM_RCP_DW8_SEL_B,
+	HW_KM_RCP_DW10_DYN,
+	HW_KM_RCP_DW10_OFS,
+	HW_KM_RCP_DW10_SEL_A,
+	HW_KM_RCP_DW10_SEL_B,
+	HW_KM_RCP_SWX_CCH,
+	HW_KM_RCP_SWX_SEL_A,
+	HW_KM_RCP_SWX_SEL_B,
+	HW_KM_RCP_MASK_A,
+	HW_KM_RCP_MASK_B,
+	HW_KM_RCP_DUAL,
+	HW_KM_RCP_PAIRED,
+	HW_KM_RCP_EL_A,
+	HW_KM_RCP_EL_B,
+	HW_KM_RCP_INFO_A,
+	HW_KM_RCP_INFO_B,
+	HW_KM_RCP_FTM_A,
+	HW_KM_RCP_FTM_B,
+	HW_KM_RCP_BANK_A,
+	HW_KM_RCP_BANK_B,
+	HW_KM_RCP_KL_A,
+	HW_KM_RCP_KL_B,
+	HW_KM_RCP_KEYWAY_A,
+	HW_KM_RCP_KEYWAY_B,
+	HW_KM_RCP_SYNERGY_MODE,
+	HW_KM_RCP_DW0_B_DYN,
+	HW_KM_RCP_DW0_B_OFS,
+	HW_KM_RCP_DW2_B_DYN,
+	HW_KM_RCP_DW2_B_OFS,
+	HW_KM_RCP_SW4_B_DYN,
+	HW_KM_RCP_SW4_B_OFS,
+	HW_KM_RCP_SW5_B_DYN,
+	HW_KM_RCP_SW5_B_OFS,
+	HW_KM_CAM_W0,
+	HW_KM_CAM_W1,
+	HW_KM_CAM_W2,
+	HW_KM_CAM_W3,
+	HW_KM_CAM_W4,
+	HW_KM_CAM_W5,
+	HW_KM_CAM_FT0,
+	HW_KM_CAM_FT1,
+	HW_KM_CAM_FT2,
+	HW_KM_CAM_FT3,
+	HW_KM_CAM_FT4,
+	HW_KM_CAM_FT5,
+	HW_KM_TCAM_T,
+	HW_KM_TCI_COLOR,
+	HW_KM_TCI_FT,
+	HW_KM_TCQ_BANK_MASK,
+	HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_hst_rcp_categories;
+	union {
+		struct hw_mod_hst_v2_s v2;
+	};
+};
+
+enum hw_hst_e {
+	/* functions */
+	HW_HST_RCP_PRESET_ALL = 0,
+	HW_HST_RCP_FIND,
+	HW_HST_RCP_COMPARE,
+	/* Control fields */
+	HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+	HW_HST_RCP_START_DYN,
+	HW_HST_RCP_START_OFS,
+	HW_HST_RCP_END_DYN,
+	HW_HST_RCP_END_OFS,
+	HW_HST_RCP_MODIF0_CMD,
+	HW_HST_RCP_MODIF0_DYN,
+	HW_HST_RCP_MODIF0_OFS,
+	HW_HST_RCP_MODIF0_VALUE,
+	HW_HST_RCP_MODIF1_CMD,
+	HW_HST_RCP_MODIF1_DYN,
+	HW_HST_RCP_MODIF1_OFS,
+	HW_HST_RCP_MODIF1_VALUE,
+	HW_HST_RCP_MODIF2_CMD,
+	HW_HST_RCP_MODIF2_DYN,
+	HW_HST_RCP_MODIF2_OFS,
+	HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value);
+
+struct flm_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_categories;
+	uint32_t nb_size_mb;
+	uint32_t nb_entry_size;
+	uint32_t nb_variant;
+	uint32_t nb_prios;
+	uint32_t nb_pst_profiles;
+	union {
+		struct hw_mod_flm_v17_s v17;
+		struct hw_mod_flm_v20_s v20;
+	};
+};
+
+enum hw_flm_e {
+	/* functions */
+	HW_FLM_CONTROL_PRESET_ALL = 0,
+	HW_FLM_RCP_PRESET_ALL,
+	HW_FLM_FLOW_LRN_DATA_V17,
+	HW_FLM_FLOW_INF_DATA_V17,
+	HW_FLM_FLOW_STA_DATA_V17,
+	/* Control fields */
+	HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+	HW_FLM_CONTROL_INIT,
+	HW_FLM_CONTROL_LDS,
+	HW_FLM_CONTROL_LFS,
+	HW_FLM_CONTROL_LIS,
+	HW_FLM_CONTROL_UDS,
+	HW_FLM_CONTROL_UIS,
+	HW_FLM_CONTROL_RDS,
+	HW_FLM_CONTROL_RIS,
+	HW_FLM_CONTROL_PDS,
+	HW_FLM_CONTROL_PIS,
+	HW_FLM_CONTROL_CRCWR,
+	HW_FLM_CONTROL_CRCRD,
+	HW_FLM_CONTROL_RBL,
+	HW_FLM_CONTROL_EAB,
+	HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+	HW_FLM_STATUS_CALIBDONE,
+	HW_FLM_STATUS_INITDONE,
+	HW_FLM_STATUS_IDLE,
+	HW_FLM_STATUS_CRITICAL,
+	HW_FLM_STATUS_PANIC,
+	HW_FLM_STATUS_CRCERR,
+	HW_FLM_STATUS_EFT_BP,
+	HW_FLM_TIMEOUT_T,
+	HW_FLM_SCRUB_I,
+	HW_FLM_LOAD_BIN,
+	HW_FLM_LOAD_PPS,
+	HW_FLM_LOAD_LPS,
+	HW_FLM_LOAD_APS,
+	HW_FLM_PRIO_LIMIT0,
+	HW_FLM_PRIO_FT0,
+	HW_FLM_PRIO_LIMIT1,
+	HW_FLM_PRIO_FT1,
+	HW_FLM_PRIO_LIMIT2,
+	HW_FLM_PRIO_FT2,
+	HW_FLM_PRIO_LIMIT3,
+	HW_FLM_PRIO_FT3,
+	HW_FLM_PST_PRESET_ALL,
+	HW_FLM_PST_BP,
+	HW_FLM_PST_PP,
+	HW_FLM_PST_TP,
+	HW_FLM_RCP_LOOKUP,
+	HW_FLM_RCP_QW0_DYN,
+	HW_FLM_RCP_QW0_OFS,
+	HW_FLM_RCP_QW0_SEL,
+	HW_FLM_RCP_QW4_DYN,
+	HW_FLM_RCP_QW4_OFS,
+	HW_FLM_RCP_SW8_DYN,
+	HW_FLM_RCP_SW8_OFS,
+	HW_FLM_RCP_SW8_SEL,
+	HW_FLM_RCP_SW9_DYN,
+	HW_FLM_RCP_SW9_OFS,
+	HW_FLM_RCP_MASK,
+	HW_FLM_RCP_KID,
+	HW_FLM_RCP_OPN,
+	HW_FLM_RCP_IPN,
+	HW_FLM_RCP_BYT_DYN,
+	HW_FLM_RCP_BYT_OFS,
+	HW_FLM_RCP_TXPLM,
+	HW_FLM_RCP_AUTO_IPV4_MASK,
+	HW_FLM_BUF_CTRL_LRN_FREE,
+	HW_FLM_BUF_CTRL_INF_AVAIL,
+	HW_FLM_BUF_CTRL_STA_AVAIL,
+	HW_FLM_STAT_LRN_DONE,
+	HW_FLM_STAT_LRN_IGNORE,
+	HW_FLM_STAT_LRN_FAIL,
+	HW_FLM_STAT_UNL_DONE,
+	HW_FLM_STAT_UNL_IGNORE,
+	HW_FLM_STAT_REL_DONE,
+	HW_FLM_STAT_REL_IGNORE,
+	HW_FLM_STAT_PRB_DONE,
+	HW_FLM_STAT_PRB_IGNORE,
+	HW_FLM_STAT_AUL_DONE,
+	HW_FLM_STAT_AUL_IGNORE,
+	HW_FLM_STAT_AUL_FAIL,
+	HW_FLM_STAT_TUL_DONE,
+	HW_FLM_STAT_FLOWS,
+	HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+	HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+	HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+	HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp;
+	union {
+		struct hw_mod_hsh_v5_s v5;
+	};
+};
+
+enum hw_hsh_e {
+	/* functions */
+	HW_HSH_RCP_PRESET_ALL = 0,
+	HW_HSH_RCP_COMPARE,
+	HW_HSH_RCP_FIND,
+	/* fields */
+	HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+	HW_HSH_RCP_MAC_PORT_MASK,
+	HW_HSH_RCP_SORT,
+	HW_HSH_RCP_QW0_PE,
+	HW_HSH_RCP_QW0_OFS,
+	HW_HSH_RCP_QW4_PE,
+	HW_HSH_RCP_QW4_OFS,
+	HW_HSH_RCP_W8_PE,
+	HW_HSH_RCP_W8_OFS,
+	HW_HSH_RCP_W8_SORT,
+	HW_HSH_RCP_W9_PE,
+	HW_HSH_RCP_W9_OFS,
+	HW_HSH_RCP_W9_SORT,
+	HW_HSH_RCP_W9_P,
+	HW_HSH_RCP_P_MASK,
+	HW_HSH_RCP_WORD_MASK,
+	HW_HSH_RCP_SEED,
+	HW_HSH_RCP_TNL_P,
+	HW_HSH_RCP_HSH_VALID,
+	HW_HSH_RCP_HSH_TYPE,
+	HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_qst_entries;
+	union {
+		struct hw_mod_qsl_v7_s v7;
+	};
+};
+
+enum hw_qsl_e {
+	/* functions */
+	HW_QSL_RCP_PRESET_ALL = 0,
+	HW_QSL_RCP_COMPARE,
+	HW_QSL_RCP_FIND,
+	HW_QSL_QST_PRESET_ALL,
+	/* fields */
+	HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+	HW_QSL_RCP_DROP,
+	HW_QSL_RCP_TBL_LO,
+	HW_QSL_RCP_TBL_HI,
+	HW_QSL_RCP_TBL_IDX,
+	HW_QSL_RCP_TBL_MSK,
+	HW_QSL_RCP_LR,
+	HW_QSL_RCP_TSA,
+	HW_QSL_RCP_VLI,
+	HW_QSL_QST_QUEUE,
+	HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+	HW_QSL_QST_TX_PORT,
+	HW_QSL_QST_LRE,
+	HW_QSL_QST_TCI,
+	HW_QSL_QST_VEN,
+	HW_QSL_QEN_EN,
+	HW_QSL_UNMQ_DEST_QUEUE,
+	HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_v1_s v1;
+	};
+};
+
+enum hw_slc_e {
+	/* functions */
+	HW_SLC_RCP_PRESET_ALL = 0,
+	HW_SLC_RCP_COMPARE,
+	HW_SLC_RCP_FIND,
+	/* fields */
+	HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_RCP_DYN,
+	HW_SLC_RCP_OFS,
+	HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_slc_lr_v2_s v2;
+	};
+};
+
+enum hw_slc_lr_e {
+	/* functions */
+	HW_SLC_LR_RCP_PRESET_ALL = 0,
+	HW_SLC_LR_RCP_COMPARE,
+	HW_SLC_LR_RCP_FIND,
+	/* fields */
+	HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+	HW_SLC_LR_RCP_DYN,
+	HW_SLC_LR_RCP_OFS,
+	HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_pdb_rcp_categories;
+
+	union {
+		struct hw_mod_pdb_v9_s v9;
+	};
+};
+
+enum hw_pdb_e {
+	/* functions */
+	HW_PDB_RCP_PRESET_ALL = 0,
+	HW_PDB_RCP_COMPARE,
+	HW_PDB_RCP_FIND,
+	/* fields */
+	HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+	HW_PDB_RCP_DESC_LEN,
+	HW_PDB_RCP_TX_PORT,
+	HW_PDB_RCP_TX_IGNORE,
+	HW_PDB_RCP_TX_NOW,
+	HW_PDB_RCP_CRC_OVERWRITE,
+	HW_PDB_RCP_ALIGN,
+	HW_PDB_RCP_OFS0_DYN,
+	HW_PDB_RCP_OFS0_REL,
+	HW_PDB_RCP_OFS1_DYN,
+	HW_PDB_RCP_OFS1_REL,
+	HW_PDB_RCP_OFS2_DYN,
+	HW_PDB_RCP_OFS2_REL,
+	HW_PDB_RCP_IP_PROT_TNL,
+	HW_PDB_RCP_PPC_HSH,
+	HW_PDB_RCP_DUPLICATE_EN,
+	HW_PDB_RCP_DUPLICATE_BIT,
+	HW_PDB_RCP_PCAP_KEEP_FCS,
+	HW_PDB_CONFIG_TS_FORMAT,
+	HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value);
+
+struct ioa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_roa_epp_entries;
+	union {
+		struct hw_mod_ioa_v4_s v4;
+	};
+};
+
+enum hw_ioa_e {
+	/* functions */
+	HW_IOA_RCP_PRESET_ALL = 0,
+	HW_IOA_RCP_COMPARE,
+	HW_IOA_RCP_FIND,
+	HW_IOA_ROA_EPP_PRESET_ALL,
+	HW_IOA_ROA_EPP_COMPARE,
+	HW_IOA_ROA_EPP_FIND,
+	/* fields */
+	HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+	HW_IOA_RCP_VLAN_POP,
+	HW_IOA_RCP_VLAN_PUSH,
+	HW_IOA_RCP_VLAN_VID,
+	HW_IOA_RCP_VLAN_DEI,
+	HW_IOA_RCP_VLAN_PCP,
+	HW_IOA_RCP_VLAN_TPID_SEL,
+	HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+	HW_IOA_RCP_QUEUE_ID,
+	HW_IOA_CONFIG_CUST_TPID_0,
+	HW_IOA_CONFIG_CUST_TPID_1,
+	HW_IOA_ROA_EPP_PUSH_TUNNEL,
+	HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+
+struct roa_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_tun_categories;
+	uint32_t nb_lag_entries;
+	union {
+		struct hw_mod_roa_v6_s v6;
+	};
+};
+
+enum hw_roa_e {
+	/* functions */
+	HW_ROA_TUNHDR_COMPARE = 0,
+	HW_ROA_TUNCFG_PRESET_ALL,
+	HW_ROA_TUNCFG_COMPARE,
+	HW_ROA_TUNCFG_FIND,
+	/* fields */
+	HW_ROA_TUNHDR = FIELD_START_INDEX,
+	HW_ROA_TUNCFG_TUN_LEN,
+	HW_ROA_TUNCFG_TUN_TYPE,
+	HW_ROA_TUNCFG_TUN_VLAN,
+	HW_ROA_TUNCFG_IP_TYPE,
+	HW_ROA_TUNCFG_IPCS_UPD,
+	HW_ROA_TUNCFG_IPCS_PRECALC,
+	HW_ROA_TUNCFG_IPTL_UPD,
+	HW_ROA_TUNCFG_IPTL_PRECALC,
+	HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+	HW_ROA_TUNCFG_TX_LAG_IX,
+	HW_ROA_TUNCFG_RECIRCULATE,
+	HW_ROA_TUNCFG_PUSH_TUNNEL,
+	HW_ROA_TUNCFG_RECIRC_PORT,
+	HW_ROA_TUNCFG_RECIRC_BYPASS,
+	HW_ROA_CONFIG_FWD_RECIRCULATE,
+	HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+	HW_ROA_CONFIG_FWD_TXPORT0,
+	HW_ROA_CONFIG_FWD_TXPORT1,
+	HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+	HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+	HW_ROA_LAGCFG_TXPHY_PORT,
+	HW_ROA_IGS_PKT_DROP,
+	HW_ROA_IGS_BYTE_DROP,
+	HW_ROA_RCC_PKT_DROP,
+	HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			   uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			    uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+	COMMON_FUNC_INFO_S;
+	union {
+		struct hw_mod_rmc_v1_3_s v1_3;
+	};
+};
+
+enum hw_rmc_e {
+	HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+	HW_RMC_BLOCK_KEEPA,
+	HW_RMC_BLOCK_RPP_SLICE,
+	HW_RMC_BLOCK_MAC_PORT,
+	HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+	COMMON_FUNC_INFO_S;
+	uint32_t nb_rcp_categories;
+	uint32_t nb_ifr_categories;
+	uint32_t nb_cpy_writers;
+	uint32_t nb_rpl_depth;
+	uint32_t nb_rpl_ext_categories;
+	union {
+		struct hw_mod_tpe_v1_s v1;
+		struct hw_mod_tpe_v2_s v2;
+	};
+};
+
+enum hw_tpe_e {
+	/* functions */
+	HW_TPE_PRESET_ALL = 0,
+	HW_TPE_FIND,
+	HW_TPE_COMPARE,
+	/* Control fields */
+	HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+	HW_TPE_IFR_RCP_EN,
+	HW_TPE_IFR_RCP_MTU,
+	HW_TPE_INS_RCP_DYN,
+	HW_TPE_INS_RCP_OFS,
+	HW_TPE_INS_RCP_LEN,
+	HW_TPE_RPL_RCP_DYN,
+	HW_TPE_RPL_RCP_OFS,
+	HW_TPE_RPL_RCP_LEN,
+	HW_TPE_RPL_RCP_RPL_PTR,
+	HW_TPE_RPL_RCP_EXT_PRIO,
+	HW_TPE_RPL_EXT_RPL_PTR,
+	HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+	HW_TPE_RPL_RPL_VALUE,
+	HW_TPE_CPY_RCP_READER_SELECT,
+	HW_TPE_CPY_RCP_DYN,
+	HW_TPE_CPY_RCP_OFS,
+	HW_TPE_CPY_RCP_LEN,
+	HW_TPE_HFU_RCP_LEN_A_WR,
+	HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+	HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_B_WR,
+	HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+	HW_TPE_HFU_RCP_LEN_C_WR,
+	HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+	HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+	HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+	HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+	HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+	HW_TPE_HFU_RCP_TTL_WR,
+	HW_TPE_HFU_RCP_TTL_POS_DYN,
+	HW_TPE_HFU_RCP_TTL_POS_OFS,
+	HW_TPE_HFU_RCP_CS_INF,
+	HW_TPE_HFU_RCP_L3_PRT,
+	HW_TPE_HFU_RCP_L3_FRAG,
+	HW_TPE_HFU_RCP_TUNNEL,
+	HW_TPE_HFU_RCP_L4_PRT,
+	HW_TPE_HFU_RCP_OUTER_L3_OFS,
+	HW_TPE_HFU_RCP_OUTER_L4_OFS,
+	HW_TPE_HFU_RCP_INNER_L3_OFS,
+	HW_TPE_HFU_RCP_INNER_L4_OFS,
+	HW_TPE_CSU_RCP_OUTER_L3_CMD,
+	HW_TPE_CSU_RCP_OUTER_L4_CMD,
+	HW_TPE_CSU_RCP_INNER_L3_CMD,
+	HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value);
+
+enum debug_mode_e {
+	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+	FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+	int version;
+	int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+	int (*get_nb_phy_port)(void *dev);
+	int (*get_nb_rx_port)(void *dev);
+	int (*get_ltx_avail)(void *dev);
+	int (*get_nb_cat_funcs)(void *dev);
+	int (*get_nb_categories)(void *dev);
+	int (*get_nb_cat_km_if_cnt)(void *dev);
+	int (*get_nb_cat_km_if_m0)(void *dev);
+	int (*get_nb_cat_km_if_m1)(void *dev);
+
+	int (*get_nb_queues)(void *dev);
+	int (*get_nb_km_flow_types)(void *dev);
+	int (*get_nb_pm_ext)(void *dev);
+	int (*get_nb_len)(void *dev);
+	int (*get_kcc_size)(void *dev);
+	int (*get_kcc_banks)(void *dev);
+	int (*get_nb_km_categories)(void *dev);
+	int (*get_nb_km_cam_banks)(void *dev);
+	int (*get_nb_km_cam_record_words)(void *dev);
+	int (*get_nb_km_cam_records)(void *dev);
+	int (*get_nb_km_tcam_banks)(void *dev);
+	int (*get_nb_km_tcam_bank_width)(void *dev);
+	int (*get_nb_flm_categories)(void *dev);
+	int (*get_nb_flm_size_mb)(void *dev);
+	int (*get_nb_flm_entry_size)(void *dev);
+	int (*get_nb_flm_variant)(void *dev);
+	int (*get_nb_flm_prios)(void *dev);
+	int (*get_nb_flm_pst_profiles)(void *dev);
+	int (*get_nb_hst_categories)(void *dev);
+	int (*get_nb_qsl_categories)(void *dev);
+	int (*get_nb_qsl_qst_entries)(void *dev);
+	int (*get_nb_pdb_categories)(void *dev);
+	int (*get_nb_ioa_categories)(void *dev);
+	int (*get_nb_roa_categories)(void *dev);
+	int (*get_nb_tpe_categories)(void *dev);
+	int (*get_nb_tx_cpy_writers)(void *dev);
+	int (*get_nb_tx_cpy_mask_mem)(void *dev);
+	int (*get_nb_tx_rpl_depth)(void *dev);
+	int (*get_nb_tx_rpl_ext_categories)(void *dev);
+	int (*get_nb_tpe_ifr_categories)(void *dev);
+
+	int (*alloc_rx_queue)(void *dev, int queue_id);
+	int (*free_rx_queue)(void *dev, int hw_queue);
+
+	/* CAT */
+	bool (*get_cat_present)(void *dev);
+	uint32_t (*get_cat_version)(void *dev);
+	int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int cat_func, int cnt);
+	int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+			     int km_if_idx, int index, int cnt);
+	int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+			     int cat_func, int cnt);
+	int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+	int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+			     int cnt);
+
+	/* KM */
+	bool (*get_km_present)(void *dev);
+	uint32_t (*get_km_version)(void *dev);
+	int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+			    int cnt);
+	int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+			     int byte, int value, int cnt);
+	int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+	int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+			    int record, int cnt);
+
+	/* FLM */
+	bool (*get_flm_present)(void *dev);
+	uint32_t (*get_flm_version)(void *dev);
+	int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+	int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+			     int cnt);
+	int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+	int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+				  const uint32_t *lrn_data, uint32_t size);
+	int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+	int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+				   uint32_t *lrn_data, uint32_t size);
+
+	/* HSH */
+	bool (*get_hsh_present)(void *dev);
+	uint32_t (*get_hsh_version)(void *dev);
+	int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+			     int category, int cnt);
+
+	/* HST */
+	bool (*get_hst_present)(void *dev);
+	uint32_t (*get_hst_version)(void *dev);
+	int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+			     int category, int cnt);
+
+	/* QSL */
+	bool (*get_qsl_present)(void *dev);
+	uint32_t (*get_qsl_version)(void *dev);
+	int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+			     int category, int cnt);
+	int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+			     int cnt);
+	int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+			      int entry, int cnt);
+
+	/* SLC */
+	bool (*get_slc_present)(void *dev);
+	uint32_t (*get_slc_version)(void *dev);
+	int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+			     int category, int cnt);
+
+	/* SLC LR */
+	bool (*get_slc_lr_present)(void *dev);
+	uint32_t (*get_slc_lr_version)(void *dev);
+	int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+				int category, int cnt);
+
+	/* PDB */
+	bool (*get_pdb_present)(void *dev);
+	uint32_t (*get_pdb_version)(void *dev);
+	int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+			     int category, int cnt);
+	int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+	/* IOA */
+	bool (*get_ioa_present)(void *dev);
+	uint32_t (*get_ioa_version)(void *dev);
+	int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+			     int cnt);
+	int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+	int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+				 int index, int cnt);
+
+	/* ROA */
+	bool (*get_roa_present)(void *dev);
+	uint32_t (*get_roa_version)(void *dev);
+	int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+	int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+	int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+				int index, int cnt);
+
+	/* RMC */
+	bool (*get_rmc_present)(void *dev);
+	uint32_t (*get_rmc_version)(void *dev);
+	int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+	/* TPE */
+	bool (*get_tpe_present)(void *dev);
+	uint32_t (*get_tpe_version)(void *dev);
+	int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				     int index, int cnt);
+	int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+	int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+				 int index, int cnt);
+};
+
+struct flow_api_backend_s {
+	void *be_dev;
+	const struct flow_api_backend_ops *iface;
+
+	/* flow filter FPGA modules */
+	struct cat_func_s cat;
+	struct km_func_s km;
+	struct flm_func_s flm;
+	struct hsh_func_s hsh;
+	struct hst_func_s hst;
+	struct qsl_func_s qsl;
+	struct slc_func_s slc;
+	struct slc_lr_func_s slc_lr;
+	struct pdb_func_s pdb;
+	struct ioa_func_s ioa;
+	struct roa_func_s roa;
+	struct rmc_func_s rmc;
+	struct tpe_func_s tpe;
+
+	/* NIC attributes */
+	unsigned int num_phy_ports;
+	unsigned int num_rx_ports;
+
+	/* flow filter resource capacities */
+	unsigned int max_categories;
+	unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+			  const struct flow_api_backend_ops *iface,
+			  void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
new file mode 100644
index 0000000000..b63730c07e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_engine.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ *                Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+	RES_QUEUE,
+	RES_CAT_CFN,
+	RES_CAT_COT,
+	RES_CAT_EXO,
+	RES_CAT_LEN,
+	RES_KM_FLOW_TYPE,
+	RES_KM_CATEGORY,
+	RES_HSH_RCP,
+	RES_PDB_RCP,
+	RES_QSL_RCP,
+	RES_QSL_QST,
+	RES_SLC_RCP,
+	RES_IOA_RCP,
+	RES_ROA_RCP,
+	RES_FLM_FLOW_TYPE,
+	RES_FLM_RCP,
+	RES_HST_RCP,
+	RES_TPE_RCP,
+	RES_TPE_EXT,
+	RES_TPE_RPL,
+	RES_COUNT,
+	RES_INVALID
+};
+
+/*
+ * ****************************************************
+ *           Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ *          128      128     32     32    32
+ * Have  |  QW0  ||  QW4  || SW8 || SW9 | SWX   in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+	KM_USE_EXTRACTOR_UNDEF,
+	KM_USE_EXTRACTOR_QWORD,
+	KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+	enum extractor_e extr;
+	int masked_for_tcam; /* if potentially selected for TCAM */
+	uint32_t e_word[4];
+	uint32_t e_mask[4];
+
+	int extr_start_offs_id;
+	int8_t rel_offs;
+	uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+	struct flow_api_backend_s *be;
+
+	/* For keeping track of identical entries */
+	struct km_flow_def_s *reference;
+	struct km_flow_def_s *root;
+
+	/* For collect flow elements and sorting */
+	struct match_elem_s match[MAX_MATCH_FIELDS];
+	struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+	int num_ftype_elem;
+
+	/* Finally formatted CAM/TCAM entry */
+	enum cam_tech_use_e target;
+	uint32_t entry_word[MAX_WORD_NUM];
+	uint32_t entry_mask[MAX_WORD_NUM];
+	int key_word_size;
+
+	/* TCAM calculated possible bank start offsets */
+	int start_offsets[MAX_TCAM_START_OFFSETS];
+	int num_start_offsets;
+
+	/* Flow information */
+
+	/*
+	 * HW input port ID needed for compare. In port must be identical on flow
+	 * types
+	 */
+	uint32_t port_id;
+	uint32_t info; /* used for color (actions) */
+	int info_set;
+	int flow_type; /* 0 is illegal and used as unset */
+	int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+	/* CAM specific bank management */
+	int cam_paired;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+
+	/* TCAM specific bank management */
+	struct tcam_distrib_s *tcam_dist;
+	int tcam_start_bank;
+	int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+	uint64_t sb_data : 32;
+	uint64_t sb_type : 8;
+	uint64_t cat_cfn : 8;
+	uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+	struct flow_api_backend_s *be;
+	union {
+		uint64_t key64;
+		uint32_t key32[2];
+		struct kcc_key_s key;
+	};
+	uint32_t km_category;
+	uint32_t id;
+
+	uint8_t *kcc_unique_ids;
+
+	int flushed_to_target;
+	int record_indexes[MAX_BANKS];
+	int bank_used;
+	uint32_t *cuckoo_moves; /* for CAM statistics only */
+	struct kcc_cam_distrib_s *cam_dist;
+	struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version; /* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
+enum port_type_e {
+	PORT_NONE, /* not defined or drop */
+	PORT_INTERNAL, /* no queues attached */
+	PORT_PHY, /* MAC phy output queue */
+	PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+	SPECIAL_MATCH_NONE,
+	SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+	uint32_t owning_port_id; /* the port who owns this output destination */
+	enum port_type_e type;
+	int id; /* depending on port type: queue ID or physical port id or not used */
+	int active; /* activated */
+};
+
+struct nic_flow_def {
+	/*
+	 * Frame Decoder match info collected
+	 */
+	int l2_prot;
+	int l3_prot;
+	int l4_prot;
+	int tunnel_prot;
+	int tunnel_l3_prot;
+	int tunnel_l4_prot;
+	int vlans;
+	int fragmentation;
+	/*
+	 * Additional meta data for various functions
+	 */
+	int in_port_override;
+	int l4_dst_port;
+	/*
+	 * Output destination info collection
+	 */
+	struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+	/* total number of available queues defined for all outputs - i.e. number of dst_id's */
+	int dst_num_avail;
+
+	/*
+	 * To identify high priority match with mark for special SW processing (non-OVS)
+	 */
+	enum special_partial_match_e special_match;
+
+	/*
+	 * Mark or Action info collection
+	 */
+	uint32_t mark;
+	uint64_t roa_actions;
+	uint64_t ioa_actions;
+
+	uint32_t jump_to_group;
+
+	uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+	int full_offload;
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_start_dyn;
+	int header_strip_start_ofs;
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+	int header_strip_removed_outer_ip;
+
+	/*
+	 * Modify field
+	 */
+	struct {
+		uint32_t select;
+		uint32_t dyn;
+		uint32_t ofs;
+		uint32_t len;
+		uint32_t level;
+		union {
+			uint8_t value8[16];
+			uint16_t value16[8];
+			uint32_t value32[4];
+		};
+	} modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+	uint32_t modify_field_count;
+	uint8_t ttl_sub_enable;
+	uint8_t ttl_sub_ipv4;
+	uint8_t ttl_sub_outer;
+
+	/*
+	 * Key Matcher flow definitions
+	 */
+	struct km_flow_def_s km;
+
+	/*
+	 * Key Matcher Category CAM
+	 */
+	struct kcc_flow_def_s *kcc;
+	int kcc_referenced;
+
+	/*
+	 * TX fragmentation IFR/RPP_LR MTU recipe
+	 */
+	uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+	FLOW_HANDLE_TYPE_FLOW,
+	FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+	enum flow_handle_type type;
+
+	struct flow_eth_dev *dev;
+	struct flow_handle *next;
+	struct flow_handle *prev;
+
+	union {
+		struct {
+			/*
+			 * 1st step conversion and validation of flow
+			 * verified and converted flow match + actions structure
+			 */
+			struct nic_flow_def *fd;
+			/*
+			 * 2nd step NIC HW resource allocation and configuration
+			 * NIC resource management structures
+			 */
+			struct {
+				int index; /* allocation index into NIC raw resource table */
+				/* number of contiguous allocations needed for this resource */
+				int count;
+				/*
+				 * This resource if not initially created by this flow, but reused
+				 * by it
+				 */
+				int referenced;
+			} resource[RES_COUNT];
+			int flushed;
+
+			uint32_t flow_stat_id;
+			uint32_t color;
+			int cao_enabled;
+			uint32_t cte;
+
+			uint32_t port_id; /* MAC port ID or override of virtual in_port */
+			uint32_t flm_ref_count;
+			uint8_t flm_group_index;
+			uint8_t flm_ft_index;
+		};
+
+		struct {
+			uint32_t flm_data[10];
+			uint8_t flm_prot;
+			uint8_t flm_kid;
+			uint8_t flm_prio;
+
+			uint16_t flm_rpl_ext_ptr;
+			uint32_t flm_nat_ipv4;
+			uint16_t flm_nat_port;
+			uint8_t flm_dscp;
+			uint32_t flm_teid;
+			uint8_t flm_rqi;
+			uint8_t flm_qfi;
+
+			uint8_t flm_mtu_fragmentation_recipe;
+
+			struct flow_handle *flm_owner;
+		};
+	};
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+					void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+				uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			  uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+				      uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
new file mode 100644
index 0000000000..c4db0f4c5c
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_nic_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
new file mode 100644
index 0000000000..9b6e5484a0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_group.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+	uint64_t ref_counter;
+	uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+	uint32_t group_count;
+
+	uint32_t *translation_table;
+
+	struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+	struct group_handle_s *group_handle;
+
+	*handle = calloc(1, sizeof(struct group_handle_s));
+	group_handle = *handle;
+
+	group_handle->group_count = group_count;
+	group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+						 sizeof(uint32_t));
+	group_handle->lookup_entries =
+		calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+	return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+	if (*handle) {
+		struct group_handle_s *group_handle =
+			(struct group_handle_s *)*handle;
+
+		free(group_handle->translation_table);
+		free(group_handle->lookup_entries);
+
+		free(*handle);
+		*handle = NULL;
+	}
+
+	return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+			     uint32_t *group_out)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	uint32_t *table_ptr;
+	uint32_t lookup;
+
+	if (group_handle == NULL || group_in >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (group_in == 0) {
+		*group_out = 0;
+		return 0;
+	}
+
+	table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+				 group_in];
+	lookup = *table_ptr;
+
+	if (lookup == 0) {
+		for (lookup = 1;
+				lookup < group_handle->group_count &&
+				group_handle->lookup_entries[lookup].ref_counter > 0;
+				++lookup)
+			;
+
+		if (lookup < group_handle->group_count) {
+			group_handle->lookup_entries[lookup].reverse_lookup =
+				table_ptr;
+			group_handle->lookup_entries[lookup].ref_counter += 1;
+
+			*table_ptr = lookup;
+		} else {
+			return -1;
+		}
+	} else {
+		group_handle->lookup_entries[lookup].ref_counter += 1;
+	}
+	*group_out = lookup;
+	return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+	struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+	struct group_lookup_entry_s *lookup;
+
+	if (group_handle == NULL ||
+			translated_group >= group_handle->group_count)
+		return -1;
+
+	/* Don't translate group 0 */
+	if (translated_group == 0)
+		return 0;
+
+	lookup = &group_handle->lookup_entries[translated_group];
+
+	if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+		lookup->ref_counter -= 1;
+		if (lookup->ref_counter == 0) {
+			*lookup->reverse_lookup = 0;
+			lookup->reverse_lookup = NULL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
new file mode 100644
index 0000000000..6982129e17
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+	return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+		((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+	return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+	uint32_t x1 = ror_inv(x, 15);
+	uint32_t x2 = ror_inv(x, 13);
+	uint32_t y1 = ror_inv(y, 3);
+	uint32_t y2 = ror_inv(y, 27);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+	return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+	const uint64_t m = 0xE0000000E0000000ULL;
+
+	return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+	const uint64_t m = 0xFFF80000FFF80000ULL;
+
+	return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+	const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+	return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+	const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+	return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+	return (((x & 0x0000000200000002) << 29) |
+		((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+		((x & 0x1555555515555555) << 3) |
+		((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+	return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+	uint64_t x1 = ror_inv15(x);
+	uint64_t x2 = ror_inv13(x);
+	uint64_t y1 = ror_inv3(y);
+	uint64_t y2 = ror_inv27(y);
+
+	return (x ^ y ^
+		((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+		 (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+		 (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+	return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+	/* 0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15   Layer 0   */
+	/*  \./     \./     \./     \./     \./     \./     \./     \./              */
+	/*   0       1       2       3       4       5       6       7     Layer 1   */
+	/*    \__.__/         \__.__/         \__.__/         \__.__/                */
+	/*       0               1               2               3         Layer 2   */
+	/*        \______.______/                 \______.______/                    */
+	/*               0                               1                 Layer 3   */
+	/*                \______________.______________/                            */
+	/*                               0                                 Layer 4   */
+	/*                              / \                                          */
+	/*                              \./                                          */
+	/*                               0                                 Layer 5   */
+	/*                              / \                                          */
+	/*                              \./                                Layer 6   */
+	/*                             value                                         */
+
+	uint64_t z;
+	uint32_t x;
+
+	z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+			mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+		  mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+			mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+	x = mix((uint32_t)(z >> 32), (uint32_t)z);
+	x = mix(x, ror_inv(x, 17));
+	x = combine(x, ror_inv(x, 17));
+
+	return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+	uint64_t val;
+	uint32_t res;
+
+	val = calc16(key);
+	res = (uint32_t)val;
+
+	if (hsh->cam_bw > 32)
+		val = (val << (hsh->cam_bw - 32)) ^ val;
+
+	for (int i = 0; i < hsh->banks; i++) {
+		result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+		val = val >> hsh->cam_records_bw;
+	}
+	return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+	hsh->banks = banks;
+	hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+	hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+	hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+	int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+	if (res)
+		printf("ERROR: testing hasher\n");
+#endif
+
+	return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+	int res = 0;
+	int val[10], resval[10];
+	uint32_t bits = 0;
+
+	uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+	const uint32_t result = 0xACECAE65;
+
+	for (int i = 0; i < 16; i++)
+		printf("%08x,", inval[i]);
+	printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+	uint32_t ret = gethash(hsh, inval, val);
+
+	printf("Return VAL = %08X  ==  %08X\n", ret, result);
+	res += (ret != result) ? 1 : 0;
+
+	int shft = (banks * record_bw) - 32;
+	int mask = (1 << record_bw) - 1;
+
+	if (shft > 0) {
+		bits = (ret >> (32 - shft));
+		ret ^= ret << shft;
+	}
+
+	resval[0] = ret & mask;
+	ret >>= record_bw;
+	resval[1] = ret & mask;
+	ret >>= record_bw;
+	resval[2] = ret & mask;
+	resval[2] |= (bits << (record_bw - shft));
+
+	for (int i = 0; i < 3; i++) {
+		printf("HASH %i: %i  ==  %i\n", i, val[i], resval[i]);
+		res += (val[i] != resval[i]) ? 1 : 0;
+	}
+
+	return res;
+}
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
new file mode 100644
index 0000000000..6365a396d2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_hasher.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+	int banks;
+	int cam_records_bw;
+	uint32_t cam_records_bw_mask;
+	int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
new file mode 100644
index 0000000000..ddf1742588
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_kcc.c
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+	struct kcc_flow_def_s *kcc_owner;
+	int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		(_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+	})
+
+
+#define BE_CAM_ENTRIES \
+	(kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+		void **handle)
+{
+	/*
+	 *  KCC entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+				 BE_UNIQUE_IDS_SIZE +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV KCC-CAM record manager\n");
+	}
+	kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+	kcc->cuckoo_moves =
+		(uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+	kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+					  BE_CAM_ENTRIES + sizeof(uint32_t));
+
+	kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+				       BE_UNIQUE_IDS_SIZE);
+	init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+	kcc->key.sb_data = 0xffffffff;
+	kcc->key.sb_type = 0;
+	return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+	kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+	kcc->key.sb_type = 1;
+	return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+	kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+	kcc->key.sb_type = 2;
+	return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+	kcc->key.port = port;
+	return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+	kcc->key.cat_cfn = cat_cfn;
+	return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+	return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+	kcc->km_category = category;
+	return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+	uint32_t i, ii;
+	/* search a free unique ID in allocation bitmap */
+	for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+		if (kcc->kcc_unique_ids[i] != 0xff)
+			break;
+
+	if (i == BE_UNIQUE_IDS_SIZE)
+		return -1;
+
+	for (ii = 0; ii < 8; ii++) {
+		if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+			kcc->kcc_unique_ids[i] =
+				(uint8_t)(kcc->kcc_unique_ids[i] |
+					  (uint8_t)(1U << ii));
+			kcc->id = (uint16_t)(i * 8 + ii);
+			NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+			       kcc->id);
+			return (int)kcc->id;
+		}
+	}
+	return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->id == KCC_ID_INVALID)
+		return;
+
+	uint32_t idx = kcc->id >> 3;
+	uint8_t shft = (uint8_t)(kcc->id & 7);
+
+	assert(idx < BE_UNIQUE_IDS_SIZE);
+	if (idx < BE_UNIQUE_IDS_SIZE) {
+		assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+		kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+		NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+		kcc->id = KCC_ID_INVALID;
+	}
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+	if (kcc->key64 == kcc1->key64)
+		return 1;
+	return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+				 kcc->key32[0]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+				 kcc->key32[1]);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+				 kcc->km_category);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+	return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+	int res = 0;
+	int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+	if (res)
+		return -1;
+	res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+	kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+	kcc->key64 = 0UL;
+	kcc->km_category = 0;
+	/* "kcc->id" holds an allocated unique id, so cleared/freed later */
+	return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+	for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+		/* It will not select itself */
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+				NULL) {
+			/*
+			 * Populate in new position
+			 */
+			int res = kcc_cam_populate(kcc, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller If you change this code in future updates, this may no
+			 * longer be true then!
+			 */
+			kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+			.kcc_owner = NULL;
+			NT_LOG(DBG, FILTER,
+			       "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       kcc->bank_used, bank,
+			       BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+			       BE_CAM_KCC_DIST_IDX(bank));
+
+			kcc->bank_used = bank;
+			(*kcc->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+				       int bank_idx, int levels,
+				       int cam_adr_list_len)
+{
+	struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+	assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	if (kcc_move_cuckoo_index(kcc))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+	kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+		int reserved = 0;
+		int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (kcc_cam_addr_reserved_stack[i_reserved] ==
+					new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+						      cam_adr_list_len);
+		if (res) {
+			if (kcc_move_cuckoo_index(kcc))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	kcc_hsh_key[0] = kcc->key32[1];
+	kcc_hsh_key[1] = kcc->key32[0];
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+	NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+	NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+	NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+	/* 2-15 never changed - remains zero */
+
+	gethash(kcc->hsh, kcc_hsh_key, val);
+
+	for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+		kcc->record_indexes[i] = val[i];
+	NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+	       kcc->record_indexes[0], kcc->record_indexes[1],
+	       kcc->record_indexes[2]);
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+		if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+				NULL) {
+			bank = i_bank;
+			break;
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+				i_bank++) {
+			if (kcc_move_cuckoo_index_level(kcc,
+							BE_CAM_KCC_DIST_IDX(i_bank),
+							4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+
+		if (bank < 0)
+			return -1;
+	}
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+	       BE_CAM_KCC_DIST_IDX(bank));
+	res = kcc_cam_populate(kcc, bank);
+	if (res == 0) {
+		kcc->flushed_to_target = 1;
+		kcc->bank_used = bank;
+	} else {
+		NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+	}
+	return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = -1;
+
+	NT_LOG(DBG, FILTER,
+	       "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+	       kcc->key64, kcc->km_category, kcc->id);
+	res = kcc_write_data_to_cam(kcc);
+	return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+	int res = 0;
+
+	if (kcc->flushed_to_target) {
+		res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+		kcc->flushed_to_target = 0;
+		kcc->bank_used = 0;
+	}
+	return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+	assert(kcc->bank_used >= 0 &&
+	       kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	NT_LOG(DBG, FILTER,
+	       "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+	return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+	if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+		return -1;
+
+	struct kcc_cam_distrib_s *cam_entry =
+		&kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+	if (cam_entry->ref_cnt) {
+		if (--cam_entry->ref_cnt == 0) {
+			kcc_clear_data_match_entry(kcc);
+			NT_LOG(DBG, FILTER,
+			       "KCC DEC Ref on Key became zero - Delete\n");
+		}
+	}
+
+	NT_LOG(DBG, FILTER,
+	       "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+	       kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+	return cam_entry->ref_cnt;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
new file mode 100644
index 0000000000..560be9f7d3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_km.c
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+	uint32_t word_len;
+	uint32_t key_mask[4];
+} cam_masks[] = {
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffffffff
+		}
+	}, /* IP6_SRC, IP6_DST */
+	{	4,
+		{	0xffffffff, 0xffffffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* DMAC,SMAC,ethtype */
+	{	4,
+		{	0xffffffff, 0xffff0000, 0x00000000,
+			0xffff0000
+		}
+	}, /* DMAC,ethtype */
+	{	4,
+		{	0x00000000, 0x0000ffff, 0xffffffff,
+			0xffff0000
+		}
+	}, /* SMAC,ethtype */
+	{ 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+	{	2,
+		{	0xffffffff, 0xffffffff, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_COMBINED */
+	{ 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+		/*
+		 * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+		 * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+		 */
+	{	1,
+		{	0xffff0000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+	{	1,
+		{	0x0000ffff, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* TP_PORT_DST32 */
+	{	1,
+		{	0x00030000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv4 TOS mask bits used often by OVS */
+	{	1,
+		{	0x00300000, 0x00000000, 0x00000000,
+			0x00000000
+		}
+	}, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+	({ \
+		int _temp_bnk = (bnk); \
+		CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+	})
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+	struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES                                            \
+	(km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+	 sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES                                                \
+	(km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+	 sizeof(struct tcam_distrib_s))
+	/*
+	 *  KM entries occupied in CAM - to manage the cuckoo shuffling
+	 *  and manage CAM population and usage
+	 *  KM entries occupied in TCAM - to manage population and usage
+	 */
+	if (!*handle) {
+		*handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+				 (size_t)TCAM_ENTRIES +
+				 sizeof(struct hasher_s));
+		NT_LOG(DBG, FILTER,
+		       "Allocate NIC DEV CAM and TCAM record manager\n");
+	}
+	km->cam_dist = (struct cam_distrib_s *)*handle;
+	km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+	km->tcam_dist =
+		(struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+					  sizeof(uint32_t));
+
+	km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+	init_hasher(km->hsh, km->be->km.nb_cam_banks,
+		    km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+	if (*handle) {
+		free(*handle);
+		NT_LOG(DBG, FILTER,
+		       "Free NIC DEV CAM and TCAM record manager\n");
+	}
+	*handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+		      uint32_t e_mask[4], uint32_t word_len,
+		      enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+	char *s = ntlog_helper_str_alloc("MATCH: ");
+
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_reset(s, "MASK : ");
+	for (unsigned int i = 0; i < word_len; i++)
+		ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	/* valid word_len 1,2,4 */
+	if (word_len == 3) {
+		word_len = 4;
+		e_word[3] = 0;
+		e_mask[3] = 0;
+	}
+	if (word_len < 1 || word_len > 4) {
+		assert(0);
+		return -1;
+	}
+
+	for (unsigned int i = 0; i < word_len; i++) {
+		km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+		km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+	}
+
+	km->match[km->num_ftype_elem].word_len = word_len;
+	km->match[km->num_ftype_elem].rel_offs = offset;
+	km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+	/*
+	 * Determine here if this flow may better be put into TCAM
+	 * Otherwise it will go into CAM
+	 * This is dependent on a cam_masks list defined above
+	 */
+	km->match[km->num_ftype_elem].masked_for_tcam = 1;
+	for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+		if (word_len == cam_masks[msk].word_len) {
+			int match = 1;
+
+			for (unsigned int wd = 0; wd < word_len; wd++) {
+				if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+					match = 0;
+					break;
+				}
+			}
+			if (match) {
+				/* Can go into CAM */
+				km->match[km->num_ftype_elem].masked_for_tcam =
+					0;
+			}
+		}
+	}
+
+	km->num_ftype_elem++;
+	return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+	km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (!marked[i] &&
+				!(km->match[i].extr_start_offs_id & SWX_INFO) &&
+				km->match[i].word_len == size)
+			return i;
+	}
+	return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+	switch (idx) {
+	case DYN_SOF:
+		return "SOF";
+	case DYN_L2:
+		return "L2 header";
+	case DYN_FIRST_VLAN:
+		return "First VLAN";
+	case DYN_MPLS:
+		return "First MPLS";
+	case DYN_L3:
+		return "L3 header";
+	case DYN_ID_IPV4_6:
+		return "ID field IPv4/6";
+	case DYN_FINAL_IP_DST:
+		return "Final IP dest";
+	case DYN_L4:
+		return "L4 header";
+	case DYN_L4_PAYLOAD:
+		return "L4 payload";
+	case DYN_TUN_PAYLOAD:
+		return "Tunnel payload";
+	case DYN_TUN_L2:
+		return "Tunnel L2 header";
+	case DYN_TUN_VLAN:
+		return "First tunneled VLAN";
+	case DYN_TUN_MPLS:
+		return "First tunneled MPLS";
+	case DYN_TUN_L3:
+		return "Tunnel L3 header";
+	case DYN_TUN_ID_IPV4_6:
+		return "Tunnel ID field IPv4/6";
+	case DYN_TUN_FINAL_IP_DST:
+		return "Tunnel final IP dest";
+	case DYN_TUN_L4:
+		return "Tunnel L4 header";
+	case DYN_TUN_L4_PAYLOAD:
+		return "Tunnel L4 payload";
+	case SB_VNI:
+		return "VNI";
+	case SB_MAC_PORT:
+		return "In Port";
+	case SB_KCC_ID:
+		return "KCC ID";
+	default:
+		break;
+	}
+	return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+	/*
+	 * Create combined extractor mappings
+	 *  if key fields may be changed to cover un-mappable otherwise?
+	 *  split into cam and tcam and use synergy mode when available
+	 *
+	 */
+	int match_marked[MAX_MATCH_FIELDS];
+	int idx = 0;
+	int next = 0;
+	int m_idx;
+	int size;
+
+	memset(match_marked, 0, sizeof(match_marked));
+
+	/* build QWords */
+	for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+		size = 4;
+		m_idx = get_word(km, size, match_marked);
+		if (m_idx < 0) {
+			size = 2;
+			m_idx = get_word(km, size, match_marked);
+
+			if (m_idx < 0) {
+				size = 1;
+				m_idx = get_word(km, 1, match_marked);
+			}
+		}
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+		/* build final entry words and mask array */
+		for (int i = 0; i < size; i++) {
+			km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+			km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+		}
+
+		idx += size;
+		next++;
+	}
+
+	m_idx = get_word(km, 4, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more QWords */
+		return -1;
+	}
+
+	/*
+	 *  On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+	 *  No match would be able to exploit these as DWORDs because of maximum length of 12 words
+	 *  in CAM
+	 *  The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+	 *  where then both these DWORDs were possible in 10 words, but we don't have such use case
+	 *  built in yet
+	 */
+	/* build SWords */
+	for (int swords = 0; swords < MAX_SWORDS; swords++) {
+		m_idx = get_word(km, 1, match_marked);
+		if (m_idx < 0) {
+			/* no more defined */
+			break;
+		}
+
+		match_marked[m_idx] = 1;
+		/* build match map list and set final extractor to use */
+		km->match_map[next] = &km->match[m_idx];
+		km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+		/* build final entry words and mask array */
+		km->entry_word[idx] = km->match[m_idx].e_word[0];
+		km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+		idx++;
+		next++;
+	}
+
+	/*
+	 * Make sure we took them all
+	 */
+	m_idx = get_word(km, 1, match_marked);
+	if (m_idx >= 0) {
+		/* cannot match more SWords */
+		return -1;
+	}
+
+	/*
+	 * Handle SWX words specially
+	 */
+	int swx_found = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id & SWX_INFO) {
+			km->match_map[next] = &km->match[i];
+			km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+			/* build final entry words and mask array */
+			km->entry_word[idx] = km->match[i].e_word[0];
+			km->entry_mask[idx] = km->match[i].e_mask[0];
+			idx++;
+			next++;
+			swx_found = 1;
+		}
+	}
+
+	assert(next == km->num_ftype_elem);
+
+	km->key_word_size = idx;
+	km->port_id = port_id;
+
+	km->target = KM_CAM;
+	/*
+	 * Finally decide if we want to put this match->action into the TCAM
+	 * When SWX word used we need to put it into CAM always, no matter what mask pattern
+	 * Later, when synergy mode is applied, we can do a split
+	 */
+	if (!swx_found && km->key_word_size <= 6) {
+		for (int i = 0; i < km->num_ftype_elem; i++) {
+			if (km->match_map[i]->masked_for_tcam) {
+				/* At least one */
+				km->target = KM_TCAM;
+			}
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+	       (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+	if (km->target == KM_TCAM) {
+		if (km->key_word_size > 10) {
+			/* do not support SWX in TCAM */
+			return -1;
+		}
+		/*
+		 * adjust for unsupported key word size in TCAM
+		 */
+		if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+				km->key_word_size == 9)) {
+			km->entry_mask[km->key_word_size] = 0;
+			km->key_word_size++;
+		}
+
+		/*
+		 * 1. the fact that the length of a key cannot change among the same used banks
+		 *
+		 *  calculate possible start indexes
+		 *  unfortunately restrictions in TCAM lookup
+		 *  makes it hard to handle key lengths larger than 6
+		 *  when other sizes should be possible too
+		 */
+		switch (km->key_word_size) {
+		case 1:
+			for (int i = 0; i < 4; i++)
+				km->start_offsets[0] = 8 + i;
+			km->num_start_offsets = 4;
+			break;
+		case 2:
+			km->start_offsets[0] = 6;
+			km->num_start_offsets = 1;
+			break;
+		case 3:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 4:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			/* enlarge to 6 */
+			km->entry_mask[km->key_word_size++] = 0;
+			km->entry_mask[km->key_word_size++] = 0;
+			break;
+		case 6:
+			km->start_offsets[0] = 0;
+			km->num_start_offsets = 1;
+			break;
+
+		default:
+			NT_LOG(DBG, FILTER,
+			       "Final Key word size too large: %i\n",
+			       km->key_word_size);
+			return -1;
+		}
+
+#ifdef FLOW_DEBUG
+		char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+		for (int i = 0; i < km->num_start_offsets; i++)
+			ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+		NT_LOG(DBG, FILTER, "%s", s);
+		ntlog_helper_str_free(s);
+#endif
+	}
+
+#ifdef FLOW_DEBUG
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		NT_LOG(DBG, FILTER,
+		       "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+		       km->match_map[i]->word_len,
+		       (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+		       "SIDEBAND" :
+		       km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+		       "SWORD" :
+		       "QWORD",
+		       get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+		       km->match_map[i]->rel_offs);
+	}
+	char *s = ntlog_helper_str_alloc("");
+
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+
+	ntlog_helper_str_reset(s, "");
+	for (int i = 0; i < km->key_word_size; i++)
+		ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+	NT_LOG(DBG, FILTER, "%s", s);
+	ntlog_helper_str_free(s);
+#endif
+
+	return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+	if (km->target != km1->target ||
+			km->num_ftype_elem != km1->num_ftype_elem ||
+			km->key_word_size != km1->key_word_size ||
+			km->info_set != km1->info_set)
+		return 0;
+
+	/*
+	 *  before KCC-CAM:
+	 *  if port is added to match, then we can have different ports in CAT
+	 *  that reuses this flow type
+	 */
+	int port_match_included = 0, kcc_swx_used = 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+			port_match_included = 1;
+			break;
+		} else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+			kcc_swx_used = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If not using KCC and if port match is not included in CAM,
+	 * we need to have same port_id to reuse
+	 */
+	if (!kcc_swx_used && !port_match_included &&
+			km->port_id != km1->port_id)
+		return 0;
+
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		/* using same extractor types in same sequence */
+		if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+				km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+				km->match_map[i]->extr != km1->match_map[i]->extr ||
+				km->match_map[i]->word_len != km1->match_map[i]->word_len)
+			return 0;
+	}
+
+	if (km->target == KM_CAM) {
+		/* in CAM must exactly match on all masks */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if (km->entry_mask[i] != km1->entry_mask[i])
+				return 0;
+		}
+
+		/* Would be set later if not reusing from km1 */
+		km->cam_paired = km1->cam_paired;
+	} else if (km->target == KM_TCAM) {
+		/*
+		 *  If TCAM, we must make sure Recipe Key Mask does not
+		 *  mask out enable bits in masks
+		 *  Note: it is important that km1 is the original creator
+		 *  of the KM Recipe, since it contains its true masks
+		 */
+		for (int i = 0; i < km->key_word_size; i++) {
+			if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+					km->entry_mask[i])
+				return 0;
+		}
+
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = -1; /* needs to be found later */
+	} else {
+		NT_LOG(DBG, FILTER,
+		       "ERROR - KM target not defined or supported\n");
+		return 0;
+	}
+
+	/*
+	 * Check for a flow clash. If already programmed return with -1
+	 */
+	int double_match = 1;
+
+	for (int i = 0; i < km->key_word_size; i++) {
+		if ((km->entry_word[i] & km->entry_mask[i]) !=
+				(km1->entry_word[i] & km1->entry_mask[i])) {
+			double_match = 0;
+			break;
+		}
+	}
+
+	if (double_match)
+		return -1;
+
+	/*
+	 * Note that TCAM and CAM may reuse same RCP and flow type
+	 * when this happens, CAM entry wins on overlap
+	 */
+
+	/* Use same KM Recipe and same flow type - return flow type */
+	return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+	int qw = 0;
+	int sw = 0;
+	int swx = 0;
+
+	hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+	/* set extractor words, offs, contrib */
+	for (int i = 0; i < km->num_ftype_elem; i++) {
+		switch (km->match_map[i]->extr) {
+		case KM_USE_EXTRACTOR_SWORD:
+			if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+				if (km->target == KM_CAM && swx == 0) {
+					/* SWX */
+					if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_VNI) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - VNI\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_MAC_PORT) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - PTC + MAC\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else if (km->match_map[i]
+							->extr_start_offs_id ==
+							SB_KCC_ID) {
+						NT_LOG(DBG, FILTER,
+						       "Set KM SWX sel A - KCC ID\n");
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_CCH,
+								  index, 0, 1);
+						hw_mod_km_rcp_set(km->be,
+								  HW_KM_RCP_SWX_SEL_A,
+								  index, 0,
+								  SWX_SEL_ALL32);
+					} else {
+						return -1;
+					}
+				} else {
+					return -1;
+				}
+				swx++;
+			} else {
+				if (sw == 0) {
+					/* DW8 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW8_SEL_A,
+							  index, 0,
+							  DW8_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else if (sw == 1) {
+					/* DW10 */
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_DYN,
+							  index, 0,
+							  km->match_map[i]
+							  ->extr_start_offs_id);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_OFS,
+							  index, 0,
+							  km->match_map[i]->rel_offs);
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_DW10_SEL_A,
+							  index, 0,
+							  DW10_SEL_FIRST32);
+					NT_LOG(DBG, FILTER,
+					       "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+					       km->match_map[i]
+					       ->extr_start_offs_id,
+					       km->match_map[i]->rel_offs);
+				} else {
+					return -1;
+				}
+				sw++;
+			}
+			break;
+
+		case KM_USE_EXTRACTOR_QWORD:
+			if (qw == 0) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW0_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW0_SEL_A,
+							  index, 0,
+							  QW0_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else if (qw == 1) {
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_DYN,
+						  index, 0,
+						  km->match_map[i]->extr_start_offs_id);
+				hw_mod_km_rcp_set(km->be,
+						  HW_KM_RCP_QW4_OFS,
+						  index, 0,
+						  km->match_map[i]->rel_offs);
+				switch (km->match_map[i]->word_len) {
+				case 1:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST32);
+					break;
+				case 2:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_FIRST64);
+					break;
+				case 4:
+					hw_mod_km_rcp_set(km->be,
+							  HW_KM_RCP_QW4_SEL_A,
+							  index, 0,
+							  QW4_SEL_ALL128);
+					break;
+				default:
+					return -1;
+				}
+				NT_LOG(DBG, FILTER,
+				       "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+				       km->match_map[i]->extr_start_offs_id,
+				       km->match_map[i]->rel_offs,
+				       km->match_map[i]->word_len);
+			} else {
+				return -1;
+			}
+			qw++;
+			break;
+		default:
+			return -1;
+		}
+	}
+
+	/* set mask A */
+	for (int i = 0; i < km->key_word_size; i++) {
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+				  (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+				  i,
+				  km->entry_mask[i]);
+		NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+	}
+
+	if (km->target == KM_CAM) {
+		/* set info - Color */
+		if (km->info_set) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+					  1);
+			NT_LOG(DBG, FILTER, "Set KM info A\n");
+		}
+		/* set key length A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+				  km->key_word_size + !!km->info_set -
+				  1); /* select id is -1 */
+		/* set Flow Type for Key A */
+		NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+		       km->key_word_size + !!km->info_set - 1);
+
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+				  1 << km->flow_type);
+
+		NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+		/* Set Paired - only on the CAM part though... */
+		if ((uint32_t)(km->key_word_size + !!km->info_set) >
+				km->be->km.nb_cam_record_words) {
+			hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+					  index, 0, 1);
+			NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+			km->cam_paired = 1;
+		}
+	} else if (km->target == KM_TCAM) {
+		uint32_t bank_bm = 0;
+
+		if (tcam_find_mapping(km) < 0) {
+			/* failed mapping into TCAM */
+			NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+			return -1;
+		}
+
+		assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+		       km->be->km.nb_tcam_banks);
+
+		for (int i = 0; i < km->key_word_size; i++) {
+			bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+					  (km->tcam_start_bank + i)));
+		}
+
+		/* Set BANK_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+		/* Set Kl_A */
+		hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+				  km->key_word_size - 1);
+
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank],
+					 km->entry_word[i]);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank],
+					 km->flow_type);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be,
+						 HW_KM_CAM_W0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->entry_word[km->be->km.nb_cam_record_words +
+						 i]);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 km->flow_type);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+	}
+
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+
+	return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+	int res = 0;
+	int cnt = km->key_word_size + !!km->info_set;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+			i++, cnt--) {
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+					 km->record_indexes[bank], 0);
+		res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+					 km->record_indexes[bank], 0);
+	}
+	km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+	if (cnt) {
+		assert(km->cam_paired);
+		for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+				i++, cnt--) {
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+			res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+						 bank,
+						 km->record_indexes[bank] + 1,
+						 0);
+		}
+		km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+	}
+	res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+				   km->cam_paired ? 2 : 1);
+	return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+	assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+	for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+		/* It will not select itself */
+		if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+			if (km->cam_paired) {
+				if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+						.km_owner != NULL)
+					continue;
+			}
+
+			/*
+			 * Populate in new position
+			 */
+			int res = cam_populate(km, bank);
+
+			if (res) {
+				NT_LOG(DBG, FILTER,
+				       "Error: failed to write to KM CAM in cuckoo move\n");
+				return 0;
+			}
+
+			/*
+			 * Reset/free entry in old bank
+			 * HW flushes are really not needed, the old addresses are always taken over
+			 * by the caller
+			 * If you change this code in future updates, this may no longer be true
+			 * then!
+			 */
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				NULL;
+			if (km->cam_paired)
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = NULL;
+
+			NT_LOG(DBG, FILTER,
+			       "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+			       km->bank_used, bank,
+			       CAM_KM_DIST_IDX(km->bank_used),
+			       CAM_KM_DIST_IDX(bank));
+			km->bank_used = bank;
+			(*km->cuckoo_moves)++;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+				   int bank_idx, int levels,
+				   int cam_adr_list_len)
+{
+	struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+	assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+	/*
+	 * Only move if same pairness
+	 * Can be extended later to handle both move of paired and single entries
+	 */
+	if (!km || km_parent->cam_paired != km->cam_paired)
+		return 0;
+
+	if (move_cuckoo_index(km))
+		return 1;
+	if (levels <= 1)
+		return 0;
+
+	assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+	cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		int reserved = 0;
+		int new_idx = CAM_KM_DIST_IDX(i);
+
+		for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+				i_reserved++) {
+			if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+				reserved = 1;
+				break;
+			}
+		}
+		if (reserved)
+			continue;
+
+		int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+						  cam_adr_list_len);
+		if (res) {
+			if (move_cuckoo_index(km))
+				return 1;
+
+			else
+				assert(0);
+		}
+	}
+
+	return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+	int res = 0;
+	int val[MAX_BANKS];
+
+	assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+	assert(km->cam_dist);
+
+	/* word list without info set */
+	gethash(km->hsh, km->entry_word, val);
+
+	for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+		/* if paired we start always on an even address - reset bit 0 */
+		km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+	}
+	NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+	       km->record_indexes[0], km->record_indexes[1],
+	       km->record_indexes[2]);
+
+	if (km->info_set) {
+		km->entry_word[km->key_word_size] =
+			km->info; /* finally set info */
+	}
+
+	int bank = -1;
+	/*
+	 * first step, see if any of the banks are free
+	 */
+	for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+		if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+			if (km->cam_paired == 0 ||
+					(km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+					 .km_owner == NULL)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+
+	if (bank < 0) {
+		/*
+		 * Second step - cuckoo move existing flows if possible
+		 */
+		for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+				i_bank++) {
+			if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+						    4, 0)) {
+				bank = i_bank;
+				break;
+			}
+		}
+	}
+	if (bank < 0)
+		return -1;
+
+	/* populate CAM */
+	NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+	       CAM_KM_DIST_IDX(bank));
+	res = cam_populate(km, bank);
+	if (res == 0) {
+		km->flushed_to_target = 1;
+		km->bank_used = bank;
+	}
+
+	return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+	for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+		if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+				NULL) {
+			int pass = 1;
+
+			for (int ii = 1; ii < km->key_word_size; ii++) {
+				if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+											 rec)]
+						.km_owner != NULL) {
+					pass = 0;
+					break;
+				}
+			}
+			if (pass) {
+				km->tcam_record = rec;
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+	/* Search record and start index for this flow */
+	for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+		if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+			km->tcam_start_bank = km->start_offsets[bs_idx];
+			NT_LOG(DBG, FILTER,
+			       "Found space in TCAM start bank %i, record %i\n",
+			       km->tcam_start_bank, km->tcam_record);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+			   uint32_t word, uint32_t mask)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+		uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+		/* calculate important value bits */
+		a = a & a_m;
+
+#ifdef FLOW_DEBUG
+		if (a_m == 0) {
+			NT_LOG(DBG, FILTER,
+			       "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+			       bank, byte, rec_val, rec_bit);
+		}
+#endif
+
+		for (int val = 0; val < 256; val++) {
+			err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if ((val & a_m) == a) {
+				all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+				if (a_m) {
+					NT_LOG(DBG, FILTER,
+					       "bank %i, byte %i, val %i(%02x), "
+					       "rec_val %i rec bit %08x\n",
+					       bank, byte, val, val, rec_val,
+					       rec_bit);
+				}
+#endif
+			} else {
+				all_recs[rec_val] &= ~rec_bit;
+			}
+			err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						  byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	/* flush bank */
+	err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	if (err == 0) {
+		assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+		       NULL);
+		km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+	}
+	return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_record < 0) {
+		tcam_find_free_record(km, km->tcam_start_bank);
+		if (km->tcam_record < 0) {
+			NT_LOG(DBG, FILTER,
+			       "FAILED to find space in TCAM for flow\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER,
+		       "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+		       km->tcam_start_bank, km->tcam_record);
+	}
+
+	/* Write KM_TCI */
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+				 km->tcam_record, km->info);
+	err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+				 km->tcam_record, km->flow_type);
+	err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+				   1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_write_word(km, km->tcam_start_bank + i,
+				      km->tcam_record, km->entry_word[i],
+				      km->entry_mask[i]);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (err == 0)
+		km->flushed_to_target = 1;
+
+	return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+	int err = 0;
+	uint32_t all_recs[3];
+
+	int rec_val = record / 32;
+	int rec_bit_shft = record % 32;
+	uint32_t rec_bit = (1 << rec_bit_shft);
+
+	assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+	for (int byte = 0; byte < 4; byte++) {
+		for (int val = 0; val < 256; val++) {
+			err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+			all_recs[rec_val] &= ~rec_bit;
+			err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+						 byte, val, all_recs);
+			if (err)
+				break;
+		}
+	}
+	if (err)
+		return err;
+
+	/* flush bank */
+	err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+	km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+	NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+	       bank, rec_val, rec_bit);
+
+	return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+	int err = 0;
+
+	if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+		NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+		return -1;
+	}
+
+	/* Write KM_TCI */
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+			  km->tcam_record, 0);
+	hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	for (int i = 0; i < km->key_word_size && !err; i++) {
+		err = tcam_reset_bank(km, km->tcam_start_bank + i,
+				      km->tcam_record);
+	}
+#ifdef FLOW_DEBUG
+	km->be->iface->set_debug_mode(km->be->be_dev,
+				      FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+			      struct km_flow_def_s *km1)
+{
+	int res = 0;
+
+	km->root = km1->root ? km1->root : km1;
+	while (km1->reference)
+		km1 = km1->reference;
+	km1->reference = km;
+
+	km->info = km1->info;
+
+	switch (km->target) {
+	case KM_CAM:
+		km->cam_paired = km1->cam_paired;
+		km->bank_used = km1->bank_used;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_TCAM:
+		km->tcam_start_bank = km1->tcam_start_bank;
+		km->tcam_record = km1->tcam_record;
+		km->flushed_to_target = km1->flushed_to_target;
+		break;
+	case KM_SYNERGY:
+	default:
+		res = -1;
+		break;
+	}
+
+	return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+	int res = -1;
+
+	km->info = color;
+	NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+	switch (km->target) {
+	case KM_CAM:
+		res = km_write_data_to_cam(km);
+		break;
+	case KM_TCAM:
+		res = km_write_data_to_tcam(km);
+		break;
+	case KM_SYNERGY:
+	default:
+		break;
+	}
+	return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+	int res = 0;
+
+	if (km->root) {
+		struct km_flow_def_s *km1 = km->root;
+
+		while (km1->reference != km)
+			km1 = km1->reference;
+
+		km1->reference = km->reference;
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->reference) {
+		km->reference->root = NULL;
+
+		switch (km->target) {
+		case KM_CAM:
+			km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+				km->reference;
+			if (km->key_word_size + !!km->info_set > 1) {
+				assert(km->cam_paired);
+				km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+				.km_owner = km->reference;
+			}
+			break;
+		case KM_TCAM:
+			for (int i = 0; i < km->key_word_size; i++) {
+				km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+					km->tcam_record)].km_owner = km->reference;
+			}
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	} else if (km->flushed_to_target) {
+		switch (km->target) {
+		case KM_CAM:
+			res = cam_reset_entry(km, km->bank_used);
+			break;
+		case KM_TCAM:
+			res = tcam_reset_entry(km);
+			break;
+		case KM_SYNERGY:
+		default:
+			res = -1;
+			break;
+		}
+		km->flushed_to_target = 0;
+		km->bank_used = 0;
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
new file mode 100644
index 0000000000..17717da8e2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_engine/flow_tunnel.c
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+	struct tunnel_cfg_s cfg;
+	struct tunnel_cfg_s cfg_mask;
+	uint32_t flow_stat_id;
+	uint8_t vport;
+	int refcnt;
+	struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+	return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+		  virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+	for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+			i++) {
+		if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+			vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+			return i;
+		}
+	}
+
+	/* no more virtual ports */
+	return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+	if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+			virt_port < MAX_HW_VIRT_PORTS) {
+		vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+		return 0;
+	}
+	return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+	__typeof__(_v1) (v1) = (_v1); \
+	__typeof__(_v2) (v2) = (_v2); \
+	__typeof__(_msk1) (msk1) = (_msk1); \
+	__typeof__(_msk2) (msk2) = (_msk2); \
+	(((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({      \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip,              \
+		(tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) &&           \
+	 check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip,              \
+		(tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) &&           \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+		(tun1_msk)->s_port) &&                                    \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+		(tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({        \
+	__typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+	__typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+	__typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+	__typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+	(check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0],    \
+		(tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+	 check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1],    \
+		(tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+	 check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0],    \
+		(tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+	 check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1],    \
+		(tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+	 check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port,       \
+		(tun1_msk)->s_port) &&                                          \
+	 check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port,       \
+		(tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+			   const struct tunnel_cfg_s *tnlcfg,
+			   const struct tunnel_cfg_s *tnlcfg_mask)
+{
+	if (tun->cfg.tun_type == tnlcfg->tun_type) {
+		if (tun->cfg.ipversion == 4) {
+			return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		} else {
+			return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+						  tnlcfg, tnlcfg_mask);
+		}
+	}
+	return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+				   const struct tunnel_cfg_s *tnlcfg_mask,
+				   int tun_set)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->flow_stat_id != (uint32_t)-1) {
+			/* This tun is already defined and set */
+			if (tun_set) {
+				/*
+				 * A tunnel full match definition - search for duplicate
+				 */
+				if (memcmp(&tun->cfg, tnlcfg,
+						sizeof(struct tunnel_cfg_s)) == 0 &&
+						memcmp(&tun->cfg_mask, tnlcfg_mask,
+						       sizeof(struct tunnel_cfg_s)) == 0)
+					break;
+			} else {
+				/*
+				 * A tunnel match search
+				 */
+				if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+					break;
+			}
+
+		} else if (tun_set) {
+			/*
+			 * Check if this is a pre-configured tunnel for this one to be set
+			 * try match them
+			 */
+			if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+				/*
+				 * Change the tun into the defining one - flow_stat_id is set later
+				 */
+				memcpy(&tun->cfg, tnlcfg,
+				       sizeof(struct tunnel_cfg_s));
+				memcpy(&tun->cfg_mask, tnlcfg_mask,
+				       sizeof(struct tunnel_cfg_s));
+
+				break;
+			}
+
+		} /* else ignore - both unset */
+		tun = tun->next;
+	}
+
+	/*
+	 * If not found, create and add it to db
+	 */
+	if (!tun) {
+		uint8_t vport = flow_tunnel_alloc_virt_port();
+
+		NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+		       vport);
+
+		if (vport < 0xff) {
+			tun = calloc(1, sizeof(struct tunnel_s));
+			memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+			memcpy(&tun->cfg_mask, tnlcfg_mask,
+			       sizeof(struct tunnel_cfg_s));
+
+			/* flow_stat_id is set later from flow code */
+			tun->flow_stat_id = (uint32_t)-1;
+			tun->vport = vport;
+			tun->refcnt = 1;
+
+			tun->next = tunnels;
+			tunnels = tun;
+		}
+	} else {
+		tun->refcnt++;
+		NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+		       tun->vport, tun->refcnt);
+	}
+
+	return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+	struct tunnel_s *tun = tunnels, *prev = NULL;
+
+	NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+	       tnl->vport, tnl->refcnt);
+	/* find tunnel in list */
+	while (tun) {
+		if (tun == tnl)
+			break;
+		prev = tun;
+		tun = tun->next;
+	}
+
+	if (!tun) {
+		NT_LOG(DBG, FILTER,
+		       "ERROR: Tunnel not found in tunnel release!\n");
+		return -1;
+	}
+
+	/* if last ref, take out of list */
+	if (--tun->refcnt == 0) {
+		if (prev)
+			prev->next = tun->next;
+		else
+			tunnels = tun->next;
+		flow_tunnel_free_virt_port(tun->vport);
+
+		NT_LOG(DBG, FILTER,
+		       "tunnel ref count == 0 remove tunnel vport %i\n",
+		       tun->vport);
+		free(tun);
+	}
+
+	return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+			      uint32_t *vni)
+{
+	int eidx = *idx;
+	struct tunnel_cfg_s tnlcfg;
+	struct tunnel_cfg_s tnlcfg_mask;
+	struct tunnel_s *rtnl = NULL;
+
+	if (elem) {
+		eidx++;
+		memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+		int valid = 1;
+		enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+		tnlcfg.d_port = 0xffff;
+		tnlcfg.tun_type = -1;
+
+		if (vni)
+			*vni = (uint32_t)-1;
+
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+				elem[eidx].type >= last_type && valid) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ANY:
+			case FLOW_ELEM_TYPE_ETH:
+				/* Ignore */
+				break;
+			case FLOW_ELEM_TYPE_IPV4: {
+				const struct flow_elem_ipv4 *ipv4 =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv4 *ipv4_mask =
+					(const struct flow_elem_ipv4 *)elem[eidx]
+					.mask;
+
+				tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+				tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+				tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+				tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+				tnlcfg.ipversion = 4;
+			}
+			break;
+			case FLOW_ELEM_TYPE_IPV6: {
+				const struct flow_elem_ipv6 *ipv6 =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.spec;
+				const struct flow_elem_ipv6 *ipv6_mask =
+					(const struct flow_elem_ipv6 *)elem[eidx]
+					.mask;
+
+				memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+				memcpy(tnlcfg_mask.v6.src_ip,
+				       ipv6_mask->hdr.src_addr,
+				       sizeof(tnlcfg.v6.src_ip));
+				memcpy(tnlcfg_mask.v6.dst_ip,
+				       ipv6_mask->hdr.dst_addr,
+				       sizeof(tnlcfg.v6.dst_ip));
+
+				tnlcfg.ipversion = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				const struct flow_elem_udp *udp =
+					(const struct flow_elem_udp *)elem[eidx]
+					.spec;
+				const struct flow_elem_udp *udp_mask =
+					(const struct flow_elem_udp *)elem[eidx]
+					.mask;
+
+				tnlcfg.s_port = udp->hdr.src_port;
+				tnlcfg.d_port = udp->hdr.dst_port;
+				tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+				tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					elem[eidx]
+					.spec;
+				if (vni)
+					*vni = (uint32_t)(((uint32_t)
+							   vxlan->vni[0]
+							   << 16) |
+							  ((uint32_t)
+							   vxlan->vni[1]
+							   << 8) |
+							  ((uint32_t)vxlan
+							   ->vni[2]));
+
+				tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+			}
+			break;
+			default:
+				valid = 0;
+				break;
+			}
+
+			last_type = elem[eidx].type;
+			eidx++;
+		}
+
+		/*
+		 * vxlan ports : 4789 or 8472
+		 */
+		if (tnlcfg.tun_type < 0 &&
+				(tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+			tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+		if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+				tnlcfg.d_port == 0xffff) {
+			NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+			return NULL;
+		}
+
+		/* search/add to DB */
+		rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+				  vni ? 0 :
+				  1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+		if (rtnl) {
+			if (vni)
+				NT_LOG(DBG, FILTER,
+				       "MATCH A TUNNEL DEFINITION - PRESET "
+				       "(PREALLOC VPORT) IF NOT FOUND:\n");
+			else
+				NT_LOG(DBG, FILTER,
+				       "SET A TUNNEL DEFINITION:\n");
+			struct in_addr addr, mask;
+			char buf[64];
+
+			addr.s_addr = rtnl->cfg.v4.src_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+			NT_LOG(DBG, FILTER, "    tun src IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			addr.s_addr = rtnl->cfg.v4.dst_ip;
+			sprintf(buf, "%s", inet_ntoa(addr));
+			mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+			NT_LOG(DBG, FILTER, "    tun dst IP: %s / %s\n", buf,
+			       inet_ntoa(mask));
+			NT_LOG(DBG, FILTER, "    tun tp_src: %i / %04x\n",
+			       htons(rtnl->cfg.s_port),
+			       htons(rtnl->cfg_mask.s_port));
+			NT_LOG(DBG, FILTER, "    tun tp_dst: %i / %04x\n",
+			       htons(rtnl->cfg.d_port),
+			       htons(rtnl->cfg_mask.d_port));
+			NT_LOG(DBG, FILTER, "    tun ipver:  %i\n",
+			       rtnl->cfg.ipversion);
+			NT_LOG(DBG, FILTER, "    tun flow_stat_id: %i\n",
+			       rtnl->flow_stat_id);
+			NT_LOG(DBG, FILTER, "    tun vport:  %i\n",
+			       rtnl->vport);
+			NT_LOG(DBG, FILTER, "    tun refcnt: %i\n",
+			       rtnl->refcnt);
+		}
+#endif
+
+		*idx = eidx; /* pointing to next or END */
+	}
+
+	return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+	return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+	rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+			  uint8_t vport)
+{
+	struct tunnel_s *tun = tunnels;
+
+	while (tun) {
+		if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+					    flow_stat_id == (uint32_t)-1)) {
+			memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+			return 0;
+		}
+		tun = tun->next;
+	}
+
+	return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+			      be16_t seed)
+{
+	unsigned int sum = seed;
+	unsigned int idx;
+
+	for (idx = 0; idx < size / 2; idx++)
+		sum += (unsigned int)(data[idx]);
+	if (size & 1)
+		sum += (unsigned char)data[idx];
+	/* unfold */
+	while (sum >> 16)
+		sum = (sum & 0xffff) + (sum >> 16);
+	return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+			  uint8_t size)
+{
+	for (uint8_t i = 0; i < size; i++)
+		result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+				 struct nic_flow_def *fd,
+				 const struct flow_elem *elem)
+{
+	uint32_t eidx = 0;
+	uint8_t size;
+	struct ipv4_hdr_s *tun_ipv4 = NULL;
+	uint16_t *tun_hdr_eth_type_p = NULL;
+
+	if (elem) {
+		while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+			switch (elem[eidx].type) {
+			case FLOW_ELEM_TYPE_ETH: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+				struct flow_elem_eth eth;
+
+				size = sizeof(struct flow_elem_eth);
+
+				copy_unmasked((uint8_t *)&eth, &elem[eidx],
+					      size);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &eth, size);
+
+				/*
+				 * Save a pointer to the tun header ethtype field
+				 * (needed later in the IPv4 and IPv6 flow elem cases)
+				 */
+				tun_hdr_eth_type_p =
+					(uint16_t *)&fd->tun_hdr.d
+					.hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "dmac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.d_addr.addr_b[0],
+				       eth.d_addr.addr_b[1],
+				       eth.d_addr.addr_b[2],
+				       eth.d_addr.addr_b[3],
+				       eth.d_addr.addr_b[5],
+				       eth.d_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER,
+				       "smac   : %02x:%02x:%02x:%02x:%02x:%02x\n",
+				       eth.s_addr.addr_b[0],
+				       eth.s_addr.addr_b[1],
+				       eth.s_addr.addr_b[2],
+				       eth.s_addr.addr_b[3],
+				       eth.s_addr.addr_b[5],
+				       eth.s_addr.addr_b[5]);
+				NT_LOG(DBG, FILTER, "type   : %04x\n",
+				       ntohs(eth.ether_type));
+#endif
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+			/* VLAN is not supported */
+
+			case FLOW_ELEM_TYPE_IPV4: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV4\n");
+				struct flow_elem_ipv4 ipv4;
+
+				size = sizeof(struct flow_elem_ipv4);
+
+				copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+					      size);
+
+				if (ipv4.hdr.version_ihl != 0x45)
+					ipv4.hdr.version_ihl = 0x45;
+
+				if (ipv4.hdr.ttl == 0)
+					ipv4.hdr.ttl = 64;
+
+				if (ipv4.hdr.next_proto_id !=
+						17)   /* must be UDP */
+					ipv4.hdr.next_proto_id = 17;
+
+				ipv4.hdr.frag_offset =
+					htons(1 << 14); /* DF flag */
+
+				size = sizeof(struct ipv4_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv4.hdr, size);
+
+				/* Set the tun header ethtype field to IPv4 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x0800); /* IPv4 */
+				}
+
+				tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+					   .hdr8[fd->tun_hdr.len];
+
+				NT_LOG(DBG, FILTER, "v_ihl  : %02x\n",
+				       tun_ipv4->version_ihl);
+				NT_LOG(DBG, FILTER, "tos    : %02x\n",
+				       tun_ipv4->tos);
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(tun_ipv4->length));
+				NT_LOG(DBG, FILTER, "id     : %02x\n",
+				       tun_ipv4->id);
+				NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+				       ntohs(tun_ipv4->frag_offset));
+				NT_LOG(DBG, FILTER, "ttl    : %02x\n",
+				       tun_ipv4->ttl);
+				NT_LOG(DBG, FILTER, "prot   : %02x\n",
+				       tun_ipv4->next_proto_id);
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(tun_ipv4->hdr_csum));
+				NT_LOG(DBG, FILTER, "src    : %d.%d.%d.%d\n",
+				       (tun_ipv4->src_ip & 0xff),
+				       ((tun_ipv4->src_ip >> 8) & 0xff),
+				       ((tun_ipv4->src_ip >> 16) & 0xff),
+				       ((tun_ipv4->src_ip >> 24) & 0xff));
+				NT_LOG(DBG, FILTER, "dst    : %d.%d.%d.%d\n",
+				       (tun_ipv4->dst_ip & 0xff),
+				       ((tun_ipv4->dst_ip >> 8) & 0xff),
+				       ((tun_ipv4->dst_ip >> 16) & 0xff),
+				       ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 4;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_IPV6: {
+				if (be->roa.ver < 6) {
+					NT_LOG(ERR, FILTER,
+					       "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+					       be->roa.ver);
+					return -1;
+				}
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel:  RTE_FLOW_ITEM_TYPE_IPV6\n");
+				struct flow_elem_ipv6 ipv6;
+
+				size = sizeof(struct flow_elem_ipv6);
+
+				copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+					      size);
+
+				/*
+				 * Make sure the version field (the 4 most significant bits of
+				 * "vtc_flow") is set to 6
+				 */
+				if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+						0) {
+					ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+				}
+
+				if (ipv6.hdr.proto != 17)   /* must be UDP */
+					ipv6.hdr.proto = 17;
+
+				if (ipv6.hdr.hop_limits == 0)
+					ipv6.hdr.hop_limits = 64;
+
+				size = sizeof(struct ipv6_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &ipv6.hdr, size);
+
+				/* Set the tun header ethtype field to IPv6 (if empty) */
+				if (tun_hdr_eth_type_p &&
+						(*tun_hdr_eth_type_p == 0)) {
+					*tun_hdr_eth_type_p =
+						htons(0x86DD); /* IPv6 */
+				}
+
+				NT_LOG(DBG, FILTER, "vtc_flow    : %08x\n",
+				       ntohl(ipv6.hdr.vtc_flow));
+				NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+				       ntohs(ipv6.hdr.payload_len));
+				NT_LOG(DBG, FILTER, "proto       : %02x\n",
+				       ipv6.hdr.proto);
+				NT_LOG(DBG, FILTER, "hop_limits  : %02x\n",
+				       ipv6.hdr.hop_limits);
+				NT_LOG(DBG, FILTER,
+				       "src         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.src_addr[0],
+				       ipv6.hdr.src_addr[1],
+				       ipv6.hdr.src_addr[2],
+				       ipv6.hdr.src_addr[3],
+				       ipv6.hdr.src_addr[4],
+				       ipv6.hdr.src_addr[5],
+				       ipv6.hdr.src_addr[6],
+				       ipv6.hdr.src_addr[7],
+				       ipv6.hdr.src_addr[8],
+				       ipv6.hdr.src_addr[9],
+				       ipv6.hdr.src_addr[10],
+				       ipv6.hdr.src_addr[11],
+				       ipv6.hdr.src_addr[12],
+				       ipv6.hdr.src_addr[13],
+				       ipv6.hdr.src_addr[14],
+				       ipv6.hdr.src_addr[15]);
+				NT_LOG(DBG, FILTER,
+				       "dst         : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+				       ipv6.hdr.dst_addr[0],
+				       ipv6.hdr.dst_addr[1],
+				       ipv6.hdr.dst_addr[2],
+				       ipv6.hdr.dst_addr[3],
+				       ipv6.hdr.dst_addr[4],
+				       ipv6.hdr.dst_addr[5],
+				       ipv6.hdr.dst_addr[6],
+				       ipv6.hdr.dst_addr[7],
+				       ipv6.hdr.dst_addr[8],
+				       ipv6.hdr.dst_addr[9],
+				       ipv6.hdr.dst_addr[10],
+				       ipv6.hdr.dst_addr[11],
+				       ipv6.hdr.dst_addr[12],
+				       ipv6.hdr.dst_addr[13],
+				       ipv6.hdr.dst_addr[14],
+				       ipv6.hdr.dst_addr[15]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+				fd->tun_hdr.ip_version = 6;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_UDP: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+				struct flow_elem_udp udp;
+
+				size = sizeof(struct flow_elem_udp);
+
+				copy_unmasked((uint8_t *)&udp, &elem[eidx],
+					      size);
+
+				udp.hdr.cksum =
+					0; /* set always the UDP checksum to 0 */
+
+				size = sizeof(struct udp_hdr_s);
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &udp.hdr, size);
+
+				NT_LOG(DBG, FILTER, "src p  : %d\n",
+				       ntohs(udp.hdr.src_port));
+				NT_LOG(DBG, FILTER, "dst p  : %d\n",
+				       ntohs(udp.hdr.dst_port));
+				NT_LOG(DBG, FILTER, "len    : %d\n",
+				       ntohs(udp.hdr.len));
+				NT_LOG(DBG, FILTER, "chksum : %04x\n",
+				       ntohs(udp.hdr.cksum));
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VXLAN: {
+				struct flow_elem_vxlan vxlan_m;
+
+				size = sizeof(struct flow_elem_vxlan);
+
+				copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+					      size);
+
+				vxlan_m.flags =
+					0x08; /* set always I-flag - valid VNI */
+
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+				       &vxlan_m, size);
+
+				NT_LOG(DBG, FILTER, "flags  : %02x\n",
+				       vxlan_m.flags);
+				NT_LOG(DBG, FILTER, "vni    : %d\n",
+				       (vxlan_m.vni[0] << 16) +
+				       (vxlan_m.vni[1] << 8) +
+				       vxlan_m.vni[2]);
+
+				fd->tun_hdr.len =
+					(uint8_t)(fd->tun_hdr.len + size);
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_PORT_ID: {
+				const struct flow_elem_port_id *port =
+					(const struct flow_elem_port_id *)
+					elem[eidx]
+					.spec;
+				fd->tun_hdr.user_port_id = port->id;
+			}
+			break;
+
+			case FLOW_ELEM_TYPE_VOID: {
+				NT_LOG(DBG, FILTER,
+				       "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+			}
+			break;
+
+			default:
+				NT_LOG(INF, FILTER,
+				       "unsupported Tunnel flow element type %u\n",
+				       elem[eidx].type);
+				return -1;
+			}
+
+			eidx++;
+		}
+	}
+
+	if (tun_ipv4) {
+		tun_ipv4->hdr_csum = 0;
+		tun_ipv4->length = 0;
+		fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+			.hdr8[14],
+			(unsigned int)sizeof(struct ipv4_hdr_s),
+			(be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+		NT_LOG(DBG, FILTER,
+		       "chksum precalc: %04x, precalc hdr len %u\n",
+		       fd->tun_hdr.ip_csum_precalc,
+		       fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
new file mode 100644
index 0000000000..5a62343718
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_cat_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "CAT MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+	be->cat.nb_cat_funcs = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_flow_types(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+	be->cat.nb_flow_types = (uint32_t)nb;
+
+	nb = be->iface->get_nb_pm_ext(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+	be->cat.nb_pm_ext = (uint32_t)nb;
+
+	nb = be->iface->get_nb_len(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "len", _MOD_, _VER_);
+	be->cat.nb_len = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_size(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+	be->cat.kcc_size = (uint32_t)nb;
+
+	nb = be->iface->get_kcc_banks(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+	be->cat.kcc_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+	if (nb < 0)
+		return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+	be->cat.km_if_count = (uint32_t)nb;
+
+	int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+	be->cat.km_if_m0 = idx;
+
+	idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+	be->cat.km_if_m1 = idx;
+
+	if (be->cat.kcc_banks)
+		be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+	else
+		be->cat.kcc_records = 0;
+
+	be->cat.kcc_id_bit_size = 10;
+
+	switch (_VER_) {
+	case 18:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v18.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cfn_s),
+			&be->cat.v18.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v18_kce_s),
+			&be->cat.v18.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_kcs_s),
+			&be->cat.v18.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+			sizeof(struct cat_v18_fte_s),
+			&be->cat.v18.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v18.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v18.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v18.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v18.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v18.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v18.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v18.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 18 */
+	case 21:
+		be->cat.cts_num = 11;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+			&be->cat.v21.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v21.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v21.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v21.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v21.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v18_cte_s),
+			&be->cat.v21.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v21.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v21.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v21.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v21.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v21.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v21.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s)))
+			return -1;
+
+		break;
+	/* end case 21 */
+	case 22:
+		be->cat.cts_num = 12;
+		if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+			&be->cat.v22.cfn,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_cfn_s),
+			&be->cat.v22.kce,
+			(be->cat.nb_cat_funcs / 8),
+			sizeof(struct cat_v21_kce_s),
+			&be->cat.v22.kcs,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v21_kcs_s),
+			&be->cat.v22.fte,
+			(be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+			sizeof(struct cat_v21_fte_s),
+			&be->cat.v22.cte,
+			be->cat.nb_cat_funcs,
+			sizeof(struct cat_v22_cte_s),
+			&be->cat.v22.cts,
+			be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+			sizeof(struct cat_v18_cts_s),
+			&be->cat.v22.cot,
+			be->max_categories,
+			sizeof(struct cat_v18_cot_s),
+			&be->cat.v22.cct,
+			be->max_categories * 4,
+			sizeof(struct cat_v18_cct_s),
+			&be->cat.v22.exo,
+			be->cat.nb_pm_ext,
+			sizeof(struct cat_v18_exo_s),
+			&be->cat.v22.rck,
+			be->cat.nb_pm_ext * 64,
+			sizeof(struct cat_v18_rck_s),
+			&be->cat.v22.len,
+			be->cat.nb_len,
+			sizeof(struct cat_v18_len_s),
+			&be->cat.v22.kcc_cam,
+			be->cat.kcc_size,
+			sizeof(struct cat_v18_kcc_s),
+			&be->cat.v22.cce,
+			4,
+			sizeof(struct cat_v22_cce_s),
+			&be->cat.v22.ccs,
+			1024,
+			sizeof(struct cat_v22_ccs_s)))
+			return -1;
+
+		break;
+	/* end case 22 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+	if (be->cat.base) {
+		free(be->cat.base);
+		be->cat.base = NULL;
+	}
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+	int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+			   0xffffffff); /* accept both ISL or not ISL */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+			   0xffffffff); /* accept both CFP or not CFP */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+			   0xffffffff); /* accept all MACs */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+			   0xffffffff); /* accept all L2 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+			   0xffffffff); /* accept all L3 prot */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+			   0xffffffff); /* accept all fragments */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+			   0xffffffff); /* IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+			   0xffffffff); /* inner IP prot check disabled */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+			   0xffffffff); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+			   0xffffffff); /* accept all truncations */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /*  */
+	hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+			   0xffffffff); /* or all */
+	if (_VER_ >= 21) {
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+		hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+				   0xffffffff); /* or all */
+	}
+	return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->cat);
+
+	NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+	if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (_VER_ <= 18) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+					 ALL_ENTRIES))
+			return -1;
+	} else {
+		NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+		if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+		if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+		if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+					 0, ALL_ENTRIES))
+			return -1;
+
+		if (be->cat.km_if_count > 1) {
+			NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+			if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+			if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+
+			NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+			if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+						 be->cat.km_if_m1, 0,
+						 ALL_ENTRIES))
+				return -1;
+		}
+	}
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+	if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+	if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+	if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+	if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+	if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+	if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+	if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+		return -1;
+
+	if (be->cat.kcc_size) {
+		NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+		if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	if (_VER_ > 21) {
+		NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+		if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+			return -1;
+		NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+		if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+			return -1;
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	switch (count) {
+	case ALL_ENTRIES:
+		if (start_idx != 0)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						be->cat.nb_cat_funcs);
+
+	default:
+		if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+			return error_index_too_large(__func__);
+		return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+						count);
+	}
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v18.cfn,
+				sizeof(struct cat_v18_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v18.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v18.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v18.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v18.cfn[index].km_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CFN_SET_ALL_DEFAULTS:
+			if (get)
+				return error_unsup_field(__func__);
+			return cfn_reset(be, index);
+		case HW_CAT_CFN_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_COMPARE:
+			rv = do_compare_indexes(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_FIND:
+			rv = find_equal_index(be->cat.v21.cfn,
+				sizeof(struct cat_v21_cfn_s), index, word_off,
+				be->cat.nb_cat_funcs, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_CFN_COPY_FROM:
+			if (get)
+				return error_unsup_field(__func__);
+			memcpy(&be->cat.v21.cfn[index],
+			       &be->cat.v21.cfn[*value],
+			       sizeof(struct cat_v21_cfn_s));
+			break;
+		case HW_CAT_CFN_ENABLE:
+			get_set(&be->cat.v21.cfn[index].enable, value, get);
+			break;
+		case HW_CAT_CFN_INV:
+			get_set(&be->cat.v21.cfn[index].inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_INV:
+			get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+			break;
+		case HW_CAT_CFN_PTC_ISL:
+			get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+			break;
+		case HW_CAT_CFN_PTC_CFP:
+			get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MAC:
+			get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VNTAG:
+			get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TUNNEL:
+			get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L2:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_VLAN:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_MPLS:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L3:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_FRAG:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_IP_PROT:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+			break;
+		case HW_CAT_CFN_PTC_TNL_L4:
+			get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+			break;
+		case HW_CAT_CFN_ERR_INV:
+			get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_CV:
+			get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+			break;
+		case HW_CAT_CFN_ERR_FCS:
+			get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TRUNC:
+			get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L3_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_L4_CS:
+			get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+			get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+			break;
+		case HW_CAT_CFN_MAC_PORT:
+			get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMP:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+			break;
+		case HW_CAT_CFN_PM_DCT:
+			get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+			break;
+		case HW_CAT_CFN_PM_EXT_INV:
+			get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_CMB:
+			get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+			break;
+		case HW_CAT_CFN_PM_AND_INV:
+			get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_OR_INV:
+			get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+			break;
+		case HW_CAT_CFN_PM_INV:
+			get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+			break;
+		case HW_CAT_CFN_LC:
+			get_set(&be->cat.v21.cfn[index].lc, value, get);
+			break;
+		case HW_CAT_CFN_LC_INV:
+			get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+			break;
+		case HW_CAT_CFN_KM0_OR:
+			get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+			break;
+		case HW_CAT_CFN_KM1_OR:
+			get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+				   enum km_flm_if_select_e if_num, int km_if_id)
+{
+	int km_if_idx;
+
+	if (_VER_ == 18) {
+		km_if_idx = 0;
+	} else {
+		if (if_num == KM_FLM_IF_SECOND) {
+			if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		} else {
+			if (be->cat.km_if_m0 == km_if_id)
+				km_if_idx = 0;
+			else if (be->cat.km_if_m1 == km_if_id)
+				km_if_idx = 1;
+			else
+				return error_unsup_field(__func__);
+		}
+	}
+	return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	/* writes 8 bits - one for each cfn - at a time */
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCE_ENABLE_BM:
+			get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v18.kcs[index].category, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCS_CATEGORY:
+			get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+				enum km_flm_if_select_e if_num, int km_if_id,
+				int start_idx, int count)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+			key_cnt;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+					start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+			    enum km_flm_if_select_e if_num, int start_idx,
+			    int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+			     enum km_flm_if_select_e if_num, int start_idx,
+			     int count)
+{
+	return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field,
+			      enum km_flm_if_select_e if_num, int km_if_id,
+			      int index, uint32_t *value, int get)
+{
+	const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+	if ((unsigned int)index >=
+			(be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+		return error_index_too_large(__func__);
+	/* find KM module */
+	int km_if_idx;
+
+	km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+	if (km_if_idx < 0)
+		return km_if_idx;
+
+	switch (_VER_) {
+	case 18:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18 */
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_FTE_ENABLE_BM:
+			get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+				value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			  enum km_flm_if_select_e if_num, int index,
+			  uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+			   enum km_flm_if_select_e if_num, int index,
+			   uint32_t *value)
+{
+	return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTE_ENABLE_BM:
+			get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * addr_size;
+	if ((unsigned int)(start_idx + count) >
+			(be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+	return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int addr_size = (be->cat.cts_num + 1) / 2;
+
+	if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CTS_CAT_A:
+			get_set(&be->cat.v18.cts[index].cat_a, value, get);
+			break;
+		case HW_CAT_CTS_CAT_B:
+			get_set(&be->cat.v18.cts[index].cat_b, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	int rv = 0;
+	if ((unsigned int)index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_COT_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->cat.v18.cot[index], (uint8_t)*value,
+			       sizeof(struct cat_v18_cot_s));
+			break;
+		case HW_CAT_COT_COMPARE:
+			rv = do_compare_indexes(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_FIND:
+			rv = find_equal_index(be->cat.v18.cot,
+				sizeof(struct cat_v18_cot_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_CAT_COT_COLOR:
+			get_set(&be->cat.v18.cot[index].color, value, get);
+			break;
+		case HW_CAT_COT_KM:
+			get_set(&be->cat.v18.cot[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_cat_funcs * 4;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_CCT_COLOR:
+			get_set(&be->cat.v18.cct[index].color, value, get);
+			break;
+		case HW_CAT_CCT_KM:
+			get_set(&be->cat.v18.cct[index].km, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.kcc_size;
+	if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, int word_off,
+			      uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->cat.kcc_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_KCC_KEY:
+			if (word_off > 1)
+				return error_word_off_too_large(__func__);
+			get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+				get);
+			break;
+
+		case HW_CAT_KCC_CATEGORY:
+			get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+			break;
+
+		case HW_CAT_KCC_ID:
+			get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, int word_off, uint32_t *value)
+{
+	return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_pm_ext)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_EXO_DYN:
+			get_set(&be->cat.v18.exo[index].dyn, value, get);
+			break;
+		case HW_CAT_EXO_OFS:
+			get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_pm_ext * 64;
+	if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_RCK_DATA:
+			get_set(&be->cat.v18.rck[index].rck_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->cat.nb_len;
+	if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+		return error_index_too_large(__func__);
+	return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 18:
+	case 21:
+	case 22:
+		switch (field) {
+		case HW_CAT_LEN_LOWER:
+			get_set(&be->cat.v18.len[index].lower, value, get);
+			break;
+		case HW_CAT_LEN_UPPER:
+			get_set(&be->cat.v18.len[index].upper, value, get);
+			break;
+		case HW_CAT_LEN_DYN1:
+			get_set(&be->cat.v18.len[index].dyn1, value, get);
+			break;
+		case HW_CAT_LEN_DYN2:
+			get_set(&be->cat.v18.len[index].dyn2, value, get);
+			break;
+		case HW_CAT_LEN_INV:
+			get_set(&be->cat.v18.len[index].inv, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 18/21/22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 4;
+	if ((unsigned int)(start_idx + count) > 4)
+		return error_index_too_large(__func__);
+	return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCE_IMM:
+			get_set(&be->cat.v22.cce[index].imm, value, get);
+			break;
+		case HW_CAT_CCE_IND:
+			get_set(&be->cat.v22.cce[index].ind, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = 1024;
+	if ((unsigned int)(start_idx + count) > 1024)
+		return error_index_too_large(__func__);
+	return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+			      enum hw_cat_e field, int index, uint32_t *value,
+			      int get)
+{
+	if ((unsigned int)index >= be->cat.nb_len)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 22:
+		switch (field) {
+		case HW_CAT_CCS_COR_EN:
+			get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+			break;
+		case HW_CAT_CCS_COR:
+			get_set(&be->cat.v22.ccs[index].cor, value, get);
+			break;
+		case HW_CAT_CCS_HSH_EN:
+			get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+			break;
+		case HW_CAT_CCS_HSH:
+			get_set(&be->cat.v22.ccs[index].hsh, value, get);
+			break;
+		case HW_CAT_CCS_QSL_EN:
+			get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+			break;
+		case HW_CAT_CCS_QSL:
+			get_set(&be->cat.v22.ccs[index].qsl, value, get);
+			break;
+		case HW_CAT_CCS_IPF_EN:
+			get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+			break;
+		case HW_CAT_CCS_IPF:
+			get_set(&be->cat.v22.ccs[index].ipf, value, get);
+			break;
+		case HW_CAT_CCS_SLC_EN:
+			get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+			break;
+		case HW_CAT_CCS_SLC:
+			get_set(&be->cat.v22.ccs[index].slc, value, get);
+			break;
+		case HW_CAT_CCS_PDB_EN:
+			get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+			break;
+		case HW_CAT_CCS_PDB:
+			get_set(&be->cat.v22.ccs[index].pdb, value, get);
+			break;
+		case HW_CAT_CCS_MSK_EN:
+			get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+			break;
+		case HW_CAT_CCS_MSK:
+			get_set(&be->cat.v22.ccs[index].msk, value, get);
+			break;
+		case HW_CAT_CCS_HST_EN:
+			get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+			break;
+		case HW_CAT_CCS_HST:
+			get_set(&be->cat.v22.ccs[index].hst, value, get);
+			break;
+		case HW_CAT_CCS_EPP_EN:
+			get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+			break;
+		case HW_CAT_CCS_EPP:
+			get_set(&be->cat.v22.ccs[index].epp, value, get);
+			break;
+		case HW_CAT_CCS_TPE_EN:
+			get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+			break;
+		case HW_CAT_CCS_TPE:
+			get_set(&be->cat.v22.ccs[index].tpe, value, get);
+			break;
+		case HW_CAT_CCS_RRB_EN:
+			get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+			break;
+		case HW_CAT_CCS_RRB:
+			get_set(&be->cat.v22.ccs[index].rrb, value, get);
+			break;
+		case HW_CAT_CCS_SB0_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+			break;
+		case HW_CAT_CCS_SB0_DATA:
+			get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+			break;
+		case HW_CAT_CCS_SB1_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+			break;
+		case HW_CAT_CCS_SB1_DATA:
+			get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+			break;
+		case HW_CAT_CCS_SB2_TYPE:
+			get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+			break;
+		case HW_CAT_CCS_SB2_DATA:
+			get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 22 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
new file mode 100644
index 0000000000..3dc4a0aac7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v18.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+	uint32_t category;
+};
+
+struct cat_v18_fte_s {
+	uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+		} b;
+	};
+};
+
+struct cat_v18_cts_s {
+	uint32_t cat_a;
+	uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_cct_s {
+	uint32_t color;
+	uint32_t km;
+};
+
+struct cat_v18_exo_s {
+	uint32_t dyn;
+	int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+	uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+	uint32_t lower;
+	uint32_t upper;
+	uint32_t dyn1;
+	uint32_t dyn2;
+	uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+	uint32_t key[2];
+	uint32_t category;
+	uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+	struct cat_v18_cfn_s *cfn;
+	struct cat_v18_kce_s *kce;
+	struct cat_v18_kcs_s *kcs;
+	struct cat_v18_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
new file mode 100644
index 0000000000..fa69ec11f3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v21.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+	uint32_t enable;
+	uint32_t inv;
+	/* protocol checks */
+	uint32_t ptc_inv;
+	uint32_t ptc_isl;
+	uint32_t ptc_cfp;
+	uint32_t ptc_mac;
+	uint32_t ptc_l2;
+	uint32_t ptc_vntag;
+	uint32_t ptc_vlan;
+	uint32_t ptc_mpls;
+	uint32_t ptc_l3;
+	uint32_t ptc_frag;
+	uint32_t ptc_ip_prot;
+	uint32_t ptc_l4;
+	uint32_t ptc_tunnel;
+	uint32_t ptc_tnl_l2;
+	uint32_t ptc_tnl_vlan;
+	uint32_t ptc_tnl_mpls;
+	uint32_t ptc_tnl_l3;
+	uint32_t ptc_tnl_frag;
+	uint32_t ptc_tnl_ip_prot;
+	uint32_t ptc_tnl_l4;
+	/* error checks */
+	uint32_t err_inv;
+	uint32_t err_cv;
+	uint32_t err_fcs;
+	uint32_t err_trunc;
+	uint32_t err_l3_cs;
+	uint32_t err_l4_cs;
+	uint32_t err_tnl_l3_cs;
+	uint32_t err_tnl_l4_cs;
+	uint32_t err_ttl_exp;
+	uint32_t err_tnl_ttl_exp;
+	/* in port */
+	uint32_t mac_port;
+	/* pattern matcher */
+	uint32_t pm_cmp[2];
+	uint32_t pm_dct;
+	uint32_t pm_ext_inv;
+	uint32_t pm_cmb;
+	uint32_t pm_and_inv;
+	uint32_t pm_or_inv;
+	uint32_t pm_inv;
+	uint32_t lc;
+	uint32_t lc_inv;
+	uint32_t km0_or;
+	uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+	uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+	uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+	uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce;
+	struct cat_v21_kcs_s *kcs;
+	struct cat_v21_fte_s *fte;
+	struct cat_v18_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
new file mode 100644
index 0000000000..fa7dc6f441
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat_v22.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+	union {
+		uint32_t enable_bm;
+		struct {
+			uint32_t col : 1;
+			uint32_t cor : 1;
+			uint32_t hsh : 1;
+			uint32_t qsl : 1;
+			uint32_t ipf : 1;
+			uint32_t slc : 1;
+			uint32_t pdb : 1;
+			uint32_t msk : 1;
+			uint32_t hst : 1;
+			uint32_t epp : 1;
+			uint32_t tpe : 1;
+			uint32_t rrb : 1;
+		} b;
+	};
+};
+
+struct cat_v22_cce_s {
+	uint32_t imm;
+	uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+	uint32_t cor_en;
+	uint32_t cor;
+	uint32_t hsh_en;
+	uint32_t hsh;
+	uint32_t qsl_en;
+	uint32_t qsl;
+	uint32_t ipf_en;
+	uint32_t ipf;
+	uint32_t slc_en;
+	uint32_t slc;
+	uint32_t pdb_en;
+	uint32_t pdb;
+	uint32_t msk_en;
+	uint32_t msk;
+	uint32_t hst_en;
+	uint32_t hst;
+	uint32_t epp_en;
+	uint32_t epp;
+	uint32_t tpe_en;
+	uint32_t tpe;
+	uint32_t rrb_en;
+	uint32_t rrb;
+	uint32_t sb0_type;
+	uint32_t sb0_data;
+	uint32_t sb1_type;
+	uint32_t sb1_data;
+	uint32_t sb2_type;
+	uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+	struct cat_v21_cfn_s *cfn;
+	struct cat_v21_kce_s *kce; /* KCE 0/1 */
+	struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+	struct cat_v21_fte_s *fte; /* FTE 0/1 */
+	struct cat_v22_cte_s *cte;
+	struct cat_v18_cts_s *cts;
+	struct cat_v18_cot_s *cot;
+	struct cat_v18_cct_s *cct;
+	struct cat_v18_exo_s *exo;
+	struct cat_v18_rck_s *rck;
+	struct cat_v18_len_s *len;
+	struct cat_v18_kcc_s *kcc_cam;
+	struct cat_v22_cce_s *cce;
+	struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
new file mode 100644
index 0000000000..404add5fe6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_flm_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "FLM MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_flm_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+	be->flm.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+	be->flm.nb_size_mb = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+	be->flm.nb_entry_size = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_variant(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_variant = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_prios(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+	be->flm.nb_prios = (uint32_t)nb;
+
+	nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+	be->flm.nb_pst_profiles = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 17:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s)))
+			return -1;
+		break;
+
+	case 20:
+		if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+			&be->flm.v17.control, 1,
+			sizeof(struct flm_v17_control_s),
+			&be->flm.v17.status, 1,
+			sizeof(struct flm_v17_status_s),
+			&be->flm.v17.timeout, 1,
+			sizeof(struct flm_v17_timeout_s),
+			&be->flm.v17.scrub, 1,
+			sizeof(struct flm_v17_scrub_s),
+			&be->flm.v17.load_bin, 1,
+			sizeof(struct flm_v17_load_bin_s),
+			&be->flm.v17.load_pps, 1,
+			sizeof(struct flm_v17_load_pps_s),
+			&be->flm.v17.load_lps, 1,
+			sizeof(struct flm_v17_load_lps_s),
+			&be->flm.v17.load_aps, 1,
+			sizeof(struct flm_v17_load_aps_s),
+			&be->flm.v17.prio, 1,
+			sizeof(struct flm_v17_prio_s),
+			&be->flm.v17.pst, be->flm.nb_pst_profiles,
+			sizeof(struct flm_v17_pst_s),
+			&be->flm.v17.rcp, be->flm.nb_categories,
+			sizeof(struct flm_v17_rcp_s),
+			&be->flm.v17.buf_ctrl, 1,
+			sizeof(struct flm_v17_buf_ctrl_s),
+			&be->flm.v17.lrn_done, 1,
+			sizeof(struct flm_v17_stat_lrn_done_s),
+			&be->flm.v17.lrn_ignore, 1,
+			sizeof(struct flm_v17_stat_lrn_ignore_s),
+			&be->flm.v17.lrn_fail, 1,
+			sizeof(struct flm_v17_stat_lrn_fail_s),
+			&be->flm.v17.unl_done, 1,
+			sizeof(struct flm_v17_stat_unl_done_s),
+			&be->flm.v17.unl_ignore, 1,
+			sizeof(struct flm_v17_stat_unl_ignore_s),
+			&be->flm.v17.rel_done, 1,
+			sizeof(struct flm_v17_stat_rel_done_s),
+			&be->flm.v17.rel_ignore, 1,
+			sizeof(struct flm_v17_stat_rel_ignore_s),
+			&be->flm.v17.aul_done, 1,
+			sizeof(struct flm_v17_stat_aul_done_s),
+			&be->flm.v17.aul_ignore, 1,
+			sizeof(struct flm_v17_stat_aul_ignore_s),
+			&be->flm.v17.aul_fail, 1,
+			sizeof(struct flm_v17_stat_aul_fail_s),
+			&be->flm.v17.tul_done, 1,
+			sizeof(struct flm_v17_stat_tul_done_s),
+			&be->flm.v17.flows, 1,
+			sizeof(struct flm_v17_stat_flows_s),
+			&be->flm.v17.prb_done, 1,
+			sizeof(struct flm_v17_stat_prb_done_s),
+			&be->flm.v17.prb_ignore, 1,
+			sizeof(struct flm_v17_stat_prb_ignore_s),
+			&be->flm.v20.sta_done, 1,
+			sizeof(struct flm_v20_stat_sta_done_s),
+			&be->flm.v20.inf_done, 1,
+			sizeof(struct flm_v20_stat_inf_done_s),
+			&be->flm.v20.inf_skip, 1,
+			sizeof(struct flm_v20_stat_inf_skip_s),
+			&be->flm.v20.pck_hit, 1,
+			sizeof(struct flm_v20_stat_pck_hit_s),
+			&be->flm.v20.pck_miss, 1,
+			sizeof(struct flm_v20_stat_pck_miss_s),
+			&be->flm.v20.pck_unh, 1,
+			sizeof(struct flm_v20_stat_pck_unh_s),
+			&be->flm.v20.pck_dis, 1,
+			sizeof(struct flm_v20_stat_pck_dis_s),
+			&be->flm.v20.csh_hit, 1,
+			sizeof(struct flm_v20_stat_csh_hit_s),
+			&be->flm.v20.csh_miss, 1,
+			sizeof(struct flm_v20_stat_csh_miss_s),
+			&be->flm.v20.csh_unh, 1,
+			sizeof(struct flm_v20_stat_csh_unh_s),
+			&be->flm.v20.cuc_start, 1,
+			sizeof(struct flm_v20_stat_cuc_start_s),
+			&be->flm.v20.cuc_move, 1,
+			sizeof(struct flm_v20_stat_cuc_move_s)))
+			return -1;
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+	if (be->flm.base) {
+		free(be->flm.base);
+		be->flm.base = NULL;
+	}
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->flm);
+
+	NT_LOG(DBG, FILTER, "INIT FLM\n");
+	hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+	hw_mod_flm_control_flush(be);
+	hw_mod_flm_timeout_flush(be);
+	hw_mod_flm_scrub_flush(be);
+	hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+	return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_CONTROL_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(be->flm.v17.control, (uint8_t)*value,
+			       sizeof(struct flm_v17_control_s));
+			break;
+		case HW_FLM_CONTROL_ENABLE:
+			get_set(&be->flm.v17.control->enable, value, get);
+			break;
+		case HW_FLM_CONTROL_INIT:
+			get_set(&be->flm.v17.control->init, value, get);
+			break;
+		case HW_FLM_CONTROL_LDS:
+			get_set(&be->flm.v17.control->lds, value, get);
+			break;
+		case HW_FLM_CONTROL_LFS:
+			get_set(&be->flm.v17.control->lfs, value, get);
+			break;
+		case HW_FLM_CONTROL_LIS:
+			get_set(&be->flm.v17.control->lis, value, get);
+			break;
+		case HW_FLM_CONTROL_UDS:
+			get_set(&be->flm.v17.control->uds, value, get);
+			break;
+		case HW_FLM_CONTROL_UIS:
+			get_set(&be->flm.v17.control->uis, value, get);
+			break;
+		case HW_FLM_CONTROL_RDS:
+			get_set(&be->flm.v17.control->rds, value, get);
+			break;
+		case HW_FLM_CONTROL_RIS:
+			get_set(&be->flm.v17.control->ris, value, get);
+			break;
+		case HW_FLM_CONTROL_PDS:
+			get_set(&be->flm.v17.control->pds, value, get);
+			break;
+		case HW_FLM_CONTROL_PIS:
+			get_set(&be->flm.v17.control->pis, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCWR:
+			get_set(&be->flm.v17.control->crcwr, value, get);
+			break;
+		case HW_FLM_CONTROL_CRCRD:
+			get_set(&be->flm.v17.control->crcrd, value, get);
+			break;
+		case HW_FLM_CONTROL_RBL:
+			get_set(&be->flm.v17.control->rbl, value, get);
+			break;
+		case HW_FLM_CONTROL_EAB:
+			get_set(&be->flm.v17.control->eab, value, get);
+			break;
+		case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+			get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+				 enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STATUS_CALIBDONE:
+			get_set(&be->flm.v17.status->calibdone, value, get);
+			break;
+		case HW_FLM_STATUS_INITDONE:
+			get_set(&be->flm.v17.status->initdone, value, get);
+			break;
+		case HW_FLM_STATUS_IDLE:
+			get_set(&be->flm.v17.status->idle, value, get);
+			break;
+		case HW_FLM_STATUS_CRITICAL:
+			get_set(&be->flm.v17.status->critical, value, get);
+			break;
+		case HW_FLM_STATUS_PANIC:
+			get_set(&be->flm.v17.status->panic, value, get);
+			break;
+		case HW_FLM_STATUS_CRCERR:
+			get_set(&be->flm.v17.status->crcerr, value, get);
+			break;
+		case HW_FLM_STATUS_EFT_BP:
+			get_set(&be->flm.v17.status->eft_bp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t value)
+{
+	return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			  uint32_t *value)
+{
+	return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_TIMEOUT_T:
+			get_set(&be->flm.v17.timeout->t, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t value)
+{
+	return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			   uint32_t *value)
+{
+	return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+				enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_SCRUB_I:
+			get_set(&be->flm.v17.scrub->i, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t value)
+{
+	return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			 uint32_t *value)
+{
+	return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_BIN:
+			get_set(&be->flm.v17.load_bin->bin, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_PPS:
+			get_set(&be->flm.v17.load_pps->pps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_LPS:
+			get_set(&be->flm.v17.load_lps->lps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_LOAD_APS:
+			get_set(&be->flm.v17.load_aps->aps, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+			       enum hw_flm_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PRIO_LIMIT0:
+			get_set(&be->flm.v17.prio->limit0, value, get);
+			break;
+		case HW_FLM_PRIO_FT0:
+			get_set(&be->flm.v17.prio->ft0, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT1:
+			get_set(&be->flm.v17.prio->limit1, value, get);
+			break;
+		case HW_FLM_PRIO_FT1:
+			get_set(&be->flm.v17.prio->ft1, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT2:
+			get_set(&be->flm.v17.prio->limit2, value, get);
+			break;
+		case HW_FLM_PRIO_FT2:
+			get_set(&be->flm.v17.prio->ft2, value, get);
+			break;
+		case HW_FLM_PRIO_LIMIT3:
+			get_set(&be->flm.v17.prio->limit3, value, get);
+			break;
+		case HW_FLM_PRIO_FT3:
+			get_set(&be->flm.v17.prio->ft3, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t value)
+{
+	return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_pst_profiles;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+		return error_index_too_large(__func__);
+	return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_PST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.pst[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_pst_s));
+			break;
+		case HW_FLM_PST_BP:
+			get_set(&be->flm.v17.pst[index].bp, value, get);
+			break;
+		case HW_FLM_PST_PP:
+			get_set(&be->flm.v17.pst[index].pp, value, get);
+			break;
+		case HW_FLM_PST_TP:
+			get_set(&be->flm.v17.pst[index].tp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->flm.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_flm_e field, int index, uint32_t *value,
+			      int get)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+			       sizeof(struct flm_v17_rcp_s));
+			break;
+		case HW_FLM_RCP_LOOKUP:
+			get_set(&be->flm.v17.rcp[index].lookup, value, get);
+			break;
+		case HW_FLM_RCP_QW0_DYN:
+			get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW0_OFS:
+			get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_FLM_RCP_QW0_SEL:
+			get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+			break;
+		case HW_FLM_RCP_QW4_DYN:
+			get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_FLM_RCP_QW4_OFS:
+			get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_DYN:
+			get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW8_OFS:
+			get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+			break;
+		case HW_FLM_RCP_SW8_SEL:
+			get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+			break;
+		case HW_FLM_RCP_SW9_DYN:
+			get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+			break;
+		case HW_FLM_RCP_SW9_OFS:
+			get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+			break;
+		case HW_FLM_RCP_MASK:
+			if (get) {
+				memcpy(value, be->flm.v17.rcp[index].mask,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			} else {
+				memcpy(be->flm.v17.rcp[index].mask, value,
+				       sizeof(((struct flm_v17_rcp_s *)0)
+					      ->mask));
+			}
+			break;
+		case HW_FLM_RCP_KID:
+			get_set(&be->flm.v17.rcp[index].kid, value, get);
+			break;
+		case HW_FLM_RCP_OPN:
+			get_set(&be->flm.v17.rcp[index].opn, value, get);
+			break;
+		case HW_FLM_RCP_IPN:
+			get_set(&be->flm.v17.rcp[index].ipn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_DYN:
+			get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+			break;
+		case HW_FLM_RCP_BYT_OFS:
+			get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+			break;
+		case HW_FLM_RCP_TXPLM:
+			get_set(&be->flm.v17.rcp[index].txplm, value, get);
+			break;
+		case HW_FLM_RCP_AUTO_IPV4_MASK:
+			get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    int index, uint32_t *value)
+{
+	if (field != HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t value)
+{
+	if (field == HW_FLM_RCP_MASK)
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+				       enum hw_flm_e field, uint32_t *value)
+{
+	int get = 1; /* Only get supported */
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_BUF_CTRL_LRN_FREE:
+			get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_INF_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+			break;
+		case HW_FLM_BUF_CTRL_STA_AVAIL:
+			get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			    uint32_t *value)
+{
+	return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+	return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+			uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_STAT_LRN_DONE:
+			*value = be->flm.v17.lrn_done->cnt;
+			break;
+		case HW_FLM_STAT_LRN_IGNORE:
+			*value = be->flm.v17.lrn_ignore->cnt;
+			break;
+		case HW_FLM_STAT_LRN_FAIL:
+			*value = be->flm.v17.lrn_fail->cnt;
+			break;
+		case HW_FLM_STAT_UNL_DONE:
+			*value = be->flm.v17.unl_done->cnt;
+			break;
+		case HW_FLM_STAT_UNL_IGNORE:
+			*value = be->flm.v17.unl_ignore->cnt;
+			break;
+		case HW_FLM_STAT_REL_DONE:
+			*value = be->flm.v17.rel_done->cnt;
+			break;
+		case HW_FLM_STAT_REL_IGNORE:
+			*value = be->flm.v17.rel_ignore->cnt;
+			break;
+		case HW_FLM_STAT_PRB_DONE:
+			*value = be->flm.v17.prb_done->cnt;
+			break;
+		case HW_FLM_STAT_PRB_IGNORE:
+			*value = be->flm.v17.prb_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_DONE:
+			*value = be->flm.v17.aul_done->cnt;
+			break;
+		case HW_FLM_STAT_AUL_IGNORE:
+			*value = be->flm.v17.aul_ignore->cnt;
+			break;
+		case HW_FLM_STAT_AUL_FAIL:
+			*value = be->flm.v17.aul_fail->cnt;
+			break;
+		case HW_FLM_STAT_TUL_DONE:
+			*value = be->flm.v17.tul_done->cnt;
+			break;
+		case HW_FLM_STAT_FLOWS:
+			*value = be->flm.v17.flows->cnt;
+			break;
+
+		default: {
+			if (_VER_ < 18)
+				return error_unsup_field(__func__);
+
+			switch (field) {
+			case HW_FLM_STAT_STA_DONE:
+				*value = be->flm.v20.sta_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_DONE:
+				*value = be->flm.v20.inf_done->cnt;
+				break;
+			case HW_FLM_STAT_INF_SKIP:
+				*value = be->flm.v20.inf_skip->cnt;
+				break;
+			case HW_FLM_STAT_PCK_HIT:
+				*value = be->flm.v20.pck_hit->cnt;
+				break;
+			case HW_FLM_STAT_PCK_MISS:
+				*value = be->flm.v20.pck_miss->cnt;
+				break;
+			case HW_FLM_STAT_PCK_UNH:
+				*value = be->flm.v20.pck_unh->cnt;
+				break;
+			case HW_FLM_STAT_PCK_DIS:
+				*value = be->flm.v20.pck_dis->cnt;
+				break;
+			case HW_FLM_STAT_CSH_HIT:
+				*value = be->flm.v20.csh_hit->cnt;
+				break;
+			case HW_FLM_STAT_CSH_MISS:
+				*value = be->flm.v20.csh_miss->cnt;
+				break;
+			case HW_FLM_STAT_CSH_UNH:
+				*value = be->flm.v20.csh_unh->cnt;
+				break;
+			case HW_FLM_STAT_CUC_START:
+				*value = be->flm.v20.cuc_start->cnt;
+				break;
+			case HW_FLM_STAT_CUC_MOVE:
+				*value = be->flm.v20.cuc_move->cnt;
+				break;
+
+			default:
+				return error_unsup_field(__func__);
+			}
+		}
+		break;
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+				  enum hw_flm_e field, const uint32_t *value)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA_V17:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_lrn_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value,
+				   uint32_t word_cnt)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_INF_DATA_V17:
+			be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+						       value, word_cnt);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+				   enum hw_flm_e field, uint32_t *value)
+{
+	switch (_VER_) {
+	case 17:
+	case 20:
+		switch (field) {
+		case HW_FLM_FLOW_STA_DATA_V17:
+			be->iface->flm_sta_data_update(be->be_dev,
+				&be->flm, value,
+				sizeof(struct flm_v17_sta_data_s) /
+				sizeof(uint32_t));
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
new file mode 100644
index 0000000000..9b4ee1991e
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v17.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+	uint64_t a : 28;
+	uint64_t b : 28;
+	uint64_t pad : 4;
+};
+
+struct flm_v17_control_s {
+	uint32_t enable;
+	uint32_t init;
+	uint32_t lds;
+	uint32_t lfs;
+	uint32_t lis;
+	uint32_t uds;
+	uint32_t uis;
+	uint32_t rds;
+	uint32_t ris;
+	uint32_t pds;
+	uint32_t pis;
+	uint32_t crcwr;
+	uint32_t crcrd;
+	uint32_t rbl;
+	uint32_t eab;
+	uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+	uint32_t calibdone;
+	uint32_t initdone;
+	uint32_t idle;
+	uint32_t critical;
+	uint32_t panic;
+	uint32_t crcerr;
+	uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+	uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+	uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+	uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+	uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+	uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+	uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+	uint32_t limit0;
+	uint32_t ft0;
+	uint32_t limit1;
+	uint32_t ft1;
+	uint32_t limit2;
+	uint32_t ft2;
+	uint32_t limit3;
+	uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+	uint32_t bp;
+	uint32_t pp;
+	uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+	uint32_t lookup;
+	uint32_t qw0_dyn;
+	uint32_t qw0_ofs;
+	uint32_t qw0_sel;
+	uint32_t qw4_dyn;
+	uint32_t qw4_ofs;
+	uint32_t sw8_dyn;
+	uint32_t sw8_ofs;
+	uint32_t sw8_sel;
+	uint32_t sw9_dyn;
+	uint32_t sw9_ofs;
+	uint32_t mask[10];
+	uint32_t kid;
+	uint32_t opn;
+	uint32_t ipn;
+	uint32_t byt_dyn;
+	uint32_t byt_ofs;
+	uint32_t txplm;
+	uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+	uint32_t lrn_free;
+	uint32_t inf_avail;
+	uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+	uint32_t sw9; /* 31:0 (32) */
+	uint32_t sw8; /* 63:32 (32) */
+	uint32_t qw4[4]; /* 191:64 (128) */
+	uint32_t qw0[4]; /* 319:192 (128) */
+	uint8_t prot; /* 327:320 (8) */
+	uint8_t kid; /* 335:328 (8) */
+	uint32_t nat_ip; /* 367:336 (32) */
+	uint32_t teid; /* 399:368 (32) */
+	uint16_t nat_port; /* 415:400 (16) */
+	uint16_t rate; /* 431:416 (16) */
+	uint16_t size; /* 447:432 (16) */
+	uint32_t color; /* 479:448 (32) */
+	uint32_t adj; /* 511:480 (32) */
+	uint8_t id[9]; /* 583:512 (72) */
+	uint16_t fill : 12; /* 595:584 (12) */
+	uint16_t ft : 4; /* 599:596 (4) */
+	uint8_t ft_mbr : 4; /* 603:600 (4) */
+	uint8_t ft_miss : 4; /* 607:604 (5) */
+
+	/* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+	uint8_t mbr_idx[14];
+	uint32_t vol_idx : 3; /* 722:720 (3) */
+	uint32_t stat_prof : 4; /* 726:723 (4) */
+	uint32_t prio : 2; /* 728:727 (2) */
+	uint32_t ent : 1; /* 729:729 (1) */
+	uint32_t op : 4; /* 733:730 (4) */
+	uint32_t dscp : 6; /* 739:734 (6) */
+	uint32_t qfi : 6; /* 745:740 (6) */
+	uint32_t rqi : 1; /* 746:746 (1) */
+	uint32_t nat_en : 1; /* 747:747 (1) */
+	uint32_t pad0 : 4; /* 751:748 (4) */
+	uint16_t pad1 : 15; /* 752:766 (15) */
+	uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+	uint64_t bytes;
+	uint64_t packets;
+	uint64_t ts;
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t cause : 3;
+	uint32_t pad : 20;
+	uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+	uint64_t id0; /* id0 and id1 results in a 72-bit int */
+	uint32_t id1 : 8;
+	uint32_t lds : 1;
+	uint32_t lfs : 1;
+	uint32_t lis : 1;
+	uint32_t uds : 1;
+	uint32_t uis : 1;
+	uint32_t rds : 1;
+	uint32_t ris : 1;
+	uint32_t pds : 1;
+	uint32_t pis : 1;
+	uint32_t pad : 14;
+	uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/* lrn_data is not handled by struct */
+	/* inf_data is not handled by struct */
+	/* sta_data is not handled by struct */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
new file mode 100644
index 0000000000..e33d4353c3
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm_v20.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+	uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+	uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+	struct flm_v17_control_s *control;
+	struct flm_v17_status_s *status;
+	struct flm_v17_timeout_s *timeout;
+	struct flm_v17_scrub_s *scrub;
+	struct flm_v17_load_bin_s *load_bin;
+	struct flm_v17_load_pps_s *load_pps;
+	struct flm_v17_load_lps_s *load_lps;
+	struct flm_v17_load_aps_s *load_aps;
+	struct flm_v17_prio_s *prio;
+	struct flm_v17_pst_s *pst;
+	struct flm_v17_rcp_s *rcp;
+	struct flm_v17_buf_ctrl_s *buf_ctrl;
+	/*
+	 * lrn_data is not handled by struct
+	 * inf_data is not handled by struct
+	 * sta_data is not handled by struct
+	 */
+	struct flm_v17_stat_lrn_done_s *lrn_done;
+	struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+	struct flm_v17_stat_lrn_fail_s *lrn_fail;
+	struct flm_v17_stat_unl_done_s *unl_done;
+	struct flm_v17_stat_unl_ignore_s *unl_ignore;
+	struct flm_v17_stat_rel_done_s *rel_done;
+	struct flm_v17_stat_rel_ignore_s *rel_ignore;
+	struct flm_v17_stat_aul_done_s *aul_done;
+	struct flm_v17_stat_aul_ignore_s *aul_ignore;
+	struct flm_v17_stat_aul_fail_s *aul_fail;
+	struct flm_v17_stat_tul_done_s *tul_done;
+	struct flm_v17_stat_flows_s *flows;
+	struct flm_v17_stat_prb_done_s *prb_done;
+	struct flm_v17_stat_prb_ignore_s *prb_ignore;
+	struct flm_v20_stat_sta_done_s *sta_done;
+	struct flm_v20_stat_inf_done_s *inf_done;
+	struct flm_v20_stat_inf_skip_s *inf_skip;
+	struct flm_v20_stat_pck_hit_s *pck_hit;
+	struct flm_v20_stat_pck_miss_s *pck_miss;
+	struct flm_v20_stat_pck_unh_s *pck_unh;
+	struct flm_v20_stat_pck_dis_s *pck_dis;
+	struct flm_v20_stat_csh_hit_s *csh_hit;
+	struct flm_v20_stat_csh_miss_s *csh_miss;
+	struct flm_v20_stat_csh_unh_s *csh_unh;
+	struct flm_v20_stat_cuc_start_s *cuc_start;
+	struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
new file mode 100644
index 0000000000..1b8896d5c2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_hsh_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HSH MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 5:
+		be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+		if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+			&be->hsh.v5.rcp,
+			be->hsh.nb_rcp,
+			sizeof(struct hsh_v5_rcp_s)))
+			return -1;
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+	if (be->hsh.base) {
+		free(be->hsh.base);
+		be->hsh.base = NULL;
+	}
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hsh);
+
+	NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+	return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hsh.nb_rcp;
+	if ((start_idx + count) > (int)be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hsh_e field, uint32_t index,
+			      uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hsh.nb_rcp)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 5:
+		switch (field) {
+		case HW_HSH_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+			       sizeof(struct hsh_v5_rcp_s));
+			break;
+		case HW_HSH_RCP_COMPARE:
+			rv = do_compare_indexes(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_FIND:
+			rv = find_equal_index(be->hsh.v5.rcp,
+				sizeof(struct hsh_v5_rcp_s), index, word_off,
+				be->hsh.nb_rcp, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HSH_RCP_LOAD_DIST_TYPE:
+			get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+			break;
+		case HW_HSH_RCP_MAC_PORT_MASK:
+			if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SORT:
+			get_set(&be->hsh.v5.rcp[index].sort, value, get);
+			break;
+		case HW_HSH_RCP_QW0_PE:
+			get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW0_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_HSH_RCP_QW4_PE:
+			get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+			break;
+		case HW_HSH_RCP_QW4_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_PE:
+			get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+			break;
+		case HW_HSH_RCP_W8_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W8_SORT:
+			get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_PE:
+			get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+			break;
+		case HW_HSH_RCP_W9_OFS:
+			get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+			break;
+		case HW_HSH_RCP_W9_SORT:
+			get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+			break;
+		case HW_HSH_RCP_W9_P:
+			get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+			break;
+		case HW_HSH_RCP_P_MASK:
+			get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+			break;
+		case HW_HSH_RCP_WORD_MASK:
+			if (word_off > HSH_RCP_WORD_MASK_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+				value, get);
+			break;
+		case HW_HSH_RCP_SEED:
+			get_set(&be->hsh.v5.rcp[index].seed, value, get);
+			break;
+		case HW_HSH_RCP_TNL_P:
+			get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+			break;
+		case HW_HSH_RCP_HSH_VALID:
+			get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+			break;
+		case HW_HSH_RCP_HSH_TYPE:
+			get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+			break;
+		case HW_HSH_RCP_AUTO_IPV4_MASK:
+			get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 5 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+		       uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
new file mode 100644
index 0000000000..8588750ff0
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh_v5.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+	uint32_t load_dist_type;
+	uint32_t mac_port_mask[4];
+	uint32_t sort;
+	uint32_t qw0_pe;
+	int32_t qw0_ofs;
+	uint32_t qw4_pe;
+	int32_t qw4_ofs;
+	uint32_t w8_pe;
+	int32_t w8_ofs;
+	uint32_t w8_sort;
+	uint32_t w9_pe;
+	int32_t w9_ofs;
+	uint32_t w9_sort;
+	uint32_t w9_p;
+	uint32_t p_mask;
+	uint32_t word_mask[10];
+	uint32_t seed;
+	uint32_t tnl_p;
+	uint32_t hsh_valid;
+	uint32_t hsh_type;
+	uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+	struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
new file mode 100644
index 0000000000..751c7b3ffe
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_hst_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_hst_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+	be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+			&be->hst.v2.rcp,
+			be->hst.nb_hst_rcp_categories,
+			sizeof(struct hst_v2_rcp_s)))
+			return -1;
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+	if (be->hst.base) {
+		free(be->hst.base);
+		be->hst.base = NULL;
+	}
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->hst);
+
+	NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+	return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->hst.nb_hst_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_hst_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->hst.nb_hst_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_HST_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hst_v2_rcp_s));
+			break;
+		case HW_HST_RCP_FIND:
+			find_equal_index(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_COMPARE:
+			rv = do_compare_indexes(be->hst.v2.rcp,
+				sizeof(struct hst_v2_rcp_s), index, *value,
+				be->hst.nb_hst_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_HST_RCP_STRIP_MODE:
+			get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+			break;
+		case HW_HST_RCP_START_DYN:
+			get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+			break;
+		case HW_HST_RCP_START_OFS:
+			get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+			break;
+		case HW_HST_RCP_END_DYN:
+			get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+			break;
+		case HW_HST_RCP_END_OFS:
+			get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_CMD:
+			get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_DYN:
+			get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_OFS:
+			get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF0_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_CMD:
+			get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_DYN:
+			get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_OFS:
+			get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF1_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_CMD:
+			get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_DYN:
+			get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_OFS:
+			get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+			break;
+		case HW_HST_RCP_MODIF2_VALUE:
+			get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 2 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+		       int index, uint32_t *value)
+{
+	return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
new file mode 100644
index 0000000000..230c70b56d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hst_v2.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+	uint32_t strip_mode;
+	uint32_t start_dyn;
+	uint32_t start_ofs;
+	uint32_t end_dyn;
+	uint32_t end_ofs;
+	uint32_t modif0_cmd;
+	uint32_t modif0_dyn;
+	uint32_t modif0_ofs;
+	uint32_t modif0_value;
+	uint32_t modif1_cmd;
+	uint32_t modif1_dyn;
+	uint32_t modif1_ofs;
+	uint32_t modif1_value;
+	uint32_t modif2_cmd;
+	uint32_t modif2_dyn;
+	uint32_t modif2_ofs;
+	uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+	struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
new file mode 100644
index 0000000000..c8e3593637
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_ioa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "IOA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+	if (nb <= 0)
+		return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+	be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+	/* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+	be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 4:
+		if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+			&be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+			sizeof(struct ioa_v4_rcp_s),
+			&be->ioa.v4.tpid, 1,
+			sizeof(struct ioa_v4_special_tpid_s),
+			&be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+			sizeof(struct ioa_v4_roa_epp_s)))
+			return -1;
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+	if (be->ioa.base) {
+		free(be->ioa.base);
+		be->ioa.base = NULL;
+	}
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->ioa);
+
+	NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+	hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+	NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+	hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+	hw_mod_ioa_config_flush(be);
+	NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+	hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+	return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_ioa_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_rcp_s));
+			break;
+		case HW_IOA_RCP_FIND:
+			rv = find_equal_index(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.rcp,
+				sizeof(struct ioa_v4_rcp_s), index, *value,
+				be->ioa.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_RCP_TUNNEL_POP:
+			get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_POP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PUSH:
+			get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_VID:
+			get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_DEI:
+			get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_PCP:
+			get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+			break;
+		case HW_IOA_RCP_VLAN_TPID_SEL:
+			get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+			get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+			break;
+		case HW_IOA_RCP_QUEUE_ID:
+			get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_CONFIG_CUST_TPID_0:
+			be->ioa.v4.tpid->cust_tpid_0 = value;
+			break;
+		case HW_IOA_CONFIG_CUST_TPID_1:
+			be->ioa.v4.tpid->cust_tpid_1 = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->ioa.nb_roa_epp_entries;
+	if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+					    count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+				  enum hw_ioa_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->ioa.nb_roa_epp_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 4:
+		switch (field) {
+		case HW_IOA_ROA_EPP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+			       sizeof(struct ioa_v4_roa_epp_s));
+			break;
+		case HW_IOA_ROA_EPP_FIND:
+			rv = find_equal_index(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_COMPARE:
+			rv = do_compare_indexes(be->ioa.v4.roa_epp,
+				sizeof(struct ioa_v4_roa_epp_s), index, *value,
+				be->ioa.nb_roa_epp_entries, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+			get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+			break;
+		case HW_IOA_ROA_EPP_TX_PORT:
+			get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 4 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+			   uint32_t index, uint32_t *value)
+{
+	return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
new file mode 100644
index 0000000000..309b53ff76
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_ioa_v4.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+	uint32_t tunnel_pop;
+	uint32_t vlan_pop;
+	uint32_t vlan_push;
+	uint32_t vlan_vid;
+	uint32_t vlan_dei;
+	uint32_t vlan_pcp;
+	uint32_t vlan_tpid_sel;
+	uint32_t queue_override_en;
+	uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+	uint32_t cust_tpid_0;
+	uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+	uint32_t push_tunnel;
+	uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+	struct ioa_v4_rcp_s *rcp;
+	struct ioa_v4_special_tpid_s *tpid;
+	struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
new file mode 100644
index 0000000000..a91f0e0a7d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+	12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_km_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "KM  MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_km_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+	be->km.nb_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+	be->km.nb_cam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_records(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+	be->km.nb_cam_records = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+	be->km.nb_cam_record_words = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+	be->km.nb_tcam_banks = (uint32_t)nb;
+
+	nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+	be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		be->km.nb_km_rcp_mask_a_word_size = 12;
+		be->km.nb_km_rcp_mask_b_word_size = 6;
+		if (!callocate_mod(CAST_COMMON(&be->km), 5,
+			&be->km.v7.rcp,
+			be->km.nb_categories,
+			sizeof(struct km_v7_rcp_s),
+			&be->km.v7.cam,
+			be->km.nb_cam_banks * be->km.nb_cam_records,
+			sizeof(struct km_v7_cam_s),
+			&be->km.v7.tcam,
+			be->km.nb_tcam_banks * 4 * 256,
+			sizeof(struct km_v7_tcam_s),
+			&be->km.v7.tci,
+			be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+			sizeof(struct km_v7_tci_s),
+			&be->km.v7.tcq,
+			KM_TCQ_ENTRIES,
+			sizeof(struct km_v7_tcq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+	if (be->km.base) {
+		free(be->km.base);
+		be->km.base = NULL;
+	}
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+	uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+	/*  int err = 0; */
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->km);
+
+	NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+	hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+	/* init CAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+	hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+	/* init TCAM - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+		/*
+		 * TCAM entries are cache controlled, thus need to hard reset initially to sync
+		 * cache with HW
+		 */
+		hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+				   tcam_v_set);
+	}
+	hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+	/* init TCI - all zero */
+	NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+	hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+	for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+		hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+	return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_categories;
+	if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+		return error_index_too_large(__func__);
+	return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int index, int word_off, uint32_t *value, int get)
+{
+	if ((unsigned int)index >= be->km.nb_categories)
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct km_v7_rcp_s));
+			break;
+		case HW_KM_RCP_QW0_DYN:
+			get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW0_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW0_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+			break;
+		case HW_KM_RCP_QW4_DYN:
+			get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+			break;
+		case HW_KM_RCP_QW4_OFS:
+			get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_A:
+			get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+			break;
+		case HW_KM_RCP_QW4_SEL_B:
+			get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW8_DYN:
+			get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW8_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW8_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+			break;
+		case HW_KM_RCP_DW10_DYN:
+			get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW10_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_A:
+			get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+			break;
+		case HW_KM_RCP_DW10_SEL_B:
+			get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+			break;
+		case HW_KM_RCP_SWX_CCH:
+			get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_A:
+			get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+			break;
+		case HW_KM_RCP_SWX_SEL_B:
+			get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+			break;
+		case HW_KM_RCP_MASK_A:
+			if (word_off > KM_RCP_MASK_D_A_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+			break;
+		case HW_KM_RCP_MASK_B:
+			if (word_off > KM_RCP_MASK_B_SIZE)
+				return error_word_off_too_large(__func__);
+			get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+			break;
+		case HW_KM_RCP_DUAL:
+			get_set(&be->km.v7.rcp[index].dual, value, get);
+			break;
+		case HW_KM_RCP_PAIRED:
+			get_set(&be->km.v7.rcp[index].paired, value, get);
+			break;
+		case HW_KM_RCP_EL_A:
+			get_set(&be->km.v7.rcp[index].el_a, value, get);
+			break;
+		case HW_KM_RCP_EL_B:
+			get_set(&be->km.v7.rcp[index].el_b, value, get);
+			break;
+		case HW_KM_RCP_INFO_A:
+			get_set(&be->km.v7.rcp[index].info_a, value, get);
+			break;
+		case HW_KM_RCP_INFO_B:
+			get_set(&be->km.v7.rcp[index].info_b, value, get);
+			break;
+		case HW_KM_RCP_FTM_A:
+			get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+			break;
+		case HW_KM_RCP_FTM_B:
+			get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+			break;
+		case HW_KM_RCP_BANK_A:
+			get_set(&be->km.v7.rcp[index].bank_a, value, get);
+			break;
+		case HW_KM_RCP_BANK_B:
+			get_set(&be->km.v7.rcp[index].bank_b, value, get);
+			break;
+		case HW_KM_RCP_KL_A:
+			get_set(&be->km.v7.rcp[index].kl_a, value, get);
+			break;
+		case HW_KM_RCP_KL_B:
+			get_set(&be->km.v7.rcp[index].kl_b, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_A:
+			get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+			break;
+		case HW_KM_RCP_KEYWAY_B:
+			get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+			break;
+		case HW_KM_RCP_SYNERGY_MODE:
+			get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW0_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_DYN:
+			get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_DW2_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW4_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_DYN:
+			get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+			break;
+		case HW_KM_RCP_SW5_B_OFS:
+			get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int index, int word_off, uint32_t *value)
+{
+	return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+	unsigned int end =
+		start_bank * be->km.nb_cam_records + start_record + count;
+	if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	if ((unsigned int)bank >= be->km.nb_cam_banks)
+		return error_index_too_large(__func__);
+	if ((unsigned int)record >= be->km.nb_cam_records)
+		return error_index_too_large(__func__);
+
+	unsigned int index = bank * be->km.nb_cam_records + record;
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_CAM_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->km.v7.cam[index], (uint8_t)*value,
+			       sizeof(struct km_v7_cam_s));
+			break;
+		case HW_KM_CAM_W0:
+			get_set(&be->km.v7.cam[index].w0, value, get);
+			break;
+		case HW_KM_CAM_W1:
+			get_set(&be->km.v7.cam[index].w1, value, get);
+			break;
+		case HW_KM_CAM_W2:
+			get_set(&be->km.v7.cam[index].w2, value, get);
+			break;
+		case HW_KM_CAM_W3:
+			get_set(&be->km.v7.cam[index].w3, value, get);
+			break;
+		case HW_KM_CAM_W4:
+			get_set(&be->km.v7.cam[index].w4, value, get);
+			break;
+		case HW_KM_CAM_W5:
+			get_set(&be->km.v7.cam[index].w5, value, get);
+			break;
+		case HW_KM_CAM_FT0:
+			get_set(&be->km.v7.cam[index].ft0, value, get);
+			break;
+		case HW_KM_CAM_FT1:
+			get_set(&be->km.v7.cam[index].ft1, value, get);
+			break;
+		case HW_KM_CAM_FT2:
+			get_set(&be->km.v7.cam[index].ft2, value, get);
+			break;
+		case HW_KM_CAM_FT3:
+			get_set(&be->km.v7.cam[index].ft3, value, get);
+			break;
+		case HW_KM_CAM_FT4:
+			get_set(&be->km.v7.cam[index].ft4, value, get);
+			break;
+		case HW_KM_CAM_FT5:
+			get_set(&be->km.v7.cam[index].ft5, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * 4 * 256;
+	else if (count == ALL_BANK_ENTRIES)
+		count = 4 * 256;
+
+	unsigned int end = start_bank * 4 * 256 + count;
+
+	if (end > (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+					count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			      int bank, int byte, int byte_val,
+			      uint32_t *value_set, int get)
+{
+	unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+	if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCAM_BANK_RESET:
+			if (get)
+				return error_unsup_field(__func__);
+			{
+				int start_idx = bank * 4 * 256;
+
+				for (int i = 0; i < 4 * 256; i++) {
+					be->km.v7.tcam[start_idx + i].t[0] =
+						value_set[0];
+					be->km.v7.tcam[start_idx + i].t[1] =
+						value_set[1];
+					be->km.v7.tcam[start_idx + i].t[2] =
+						value_set[2];
+					be->km.v7.tcam[start_idx + i].dirty = 1;
+				}
+			}
+			break;
+		case HW_KM_TCAM_T: {
+			int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+			if (get) {
+				value_set[0] = be->km.v7.tcam[index].t[0];
+				value_set[1] = be->km.v7.tcam[index].t[1];
+				value_set[2] = be->km.v7.tcam[index].t[2];
+			} else {
+				/* only change if any bits has to be changed */
+				if (be->km.v7.tcam[index].t[0] !=
+						value_set[0] ||
+						be->km.v7.tcam[index].t[1] !=
+						value_set[1] ||
+						be->km.v7.tcam[index].t[2] !=
+						value_set[2]) {
+					be->km.v7.tcam[index].t[0] =
+						value_set[0];
+					be->km.v7.tcam[index].t[1] =
+						value_set[1];
+					be->km.v7.tcam[index].t[2] =
+						value_set[2];
+					be->km.v7.tcam[index].dirty = 1;
+				}
+			}
+		}
+		break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		       int bank, int byte, int byte_val, uint32_t *value_set)
+{
+	return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+				  1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCI_COLOR:
+			get_set(&be->km.v7.tci[index].color, value, get);
+			break;
+		case HW_KM_TCI_FT:
+			get_set(&be->km.v7.tci[index].ft, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+			int start_record, int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+	unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+			   start_record + count;
+
+	if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+				       start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+			     int bank, int record, uint32_t *value, int get)
+{
+	unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+	if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+		return error_index_too_large(__func__);
+
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_KM_TCQ_BANK_MASK:
+			get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+			break;
+		case HW_KM_TCQ_QUAL:
+			get_set(&be->km.v7.tcq[index].qual, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+		      int bank, int record, uint32_t *value)
+{
+	return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
new file mode 100644
index 0000000000..798a175609
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km_v7.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+	uint32_t qw0_dyn;
+	int32_t qw0_ofs;
+	uint32_t qw0_sel_a;
+	uint32_t qw0_sel_b;
+	uint32_t qw4_dyn;
+	int32_t qw4_ofs;
+	uint32_t qw4_sel_a;
+	uint32_t qw4_sel_b;
+	uint32_t dw8_dyn;
+	int32_t dw8_ofs;
+	uint32_t dw8_sel_a;
+	uint32_t dw8_sel_b;
+	uint32_t dw10_dyn;
+	int32_t dw10_ofs;
+	uint32_t dw10_sel_a;
+	uint32_t dw10_sel_b;
+	uint32_t swx_cch;
+	uint32_t swx_sel_a;
+	uint32_t swx_sel_b;
+	uint32_t mask_d_a[12];
+	uint32_t mask_b[6];
+	uint32_t dual;
+	uint32_t paired;
+	uint32_t el_a;
+	uint32_t el_b;
+	uint32_t info_a;
+	uint32_t info_b;
+	uint32_t ftm_a;
+	uint32_t ftm_b;
+	uint32_t bank_a;
+	uint32_t bank_b;
+	uint32_t kl_a;
+	uint32_t kl_b;
+	uint32_t keyway_a;
+	uint32_t keyway_b;
+	uint32_t synergy_mode;
+	uint32_t dw0_b_dyn;
+	int32_t dw0_b_ofs;
+	uint32_t dw2_b_dyn;
+	int32_t dw2_b_ofs;
+	uint32_t sw4_b_dyn;
+	int32_t sw4_b_ofs;
+	uint32_t sw5_b_dyn;
+	int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+	uint32_t w0;
+	uint32_t w1;
+	uint32_t w2;
+	uint32_t w3;
+	uint32_t w4;
+	uint32_t w5;
+	uint32_t ft0;
+	uint32_t ft1;
+	uint32_t ft2;
+	uint32_t ft3;
+	uint32_t ft4;
+	uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+	uint32_t t[3];
+	uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+	uint32_t color;
+	uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+	uint32_t bank_mask;
+	uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+	struct km_v7_rcp_s *rcp;
+	struct km_v7_cam_s *cam;
+	struct km_v7_tcam_s *tcam;
+	struct km_v7_tci_s *tci;
+	struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
new file mode 100644
index 0000000000..8d0d70cae2
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_pdb_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "PDB MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_pdb_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+	be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 9:
+		if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+			&be->pdb.v9.rcp,
+			be->pdb.nb_pdb_rcp_categories,
+			sizeof(struct pdb_v9_rcp_s),
+			&be->pdb.v9.config,
+			1,
+			sizeof(struct pdb_v9_config_s)))
+			return -1;
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+	if (be->pdb.base) {
+		free(be->pdb.base);
+		be->pdb.base = NULL;
+	}
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->pdb);
+
+	NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+	err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+	err |= hw_mod_pdb_config_flush(be);
+	return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->pdb.nb_pdb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_pdb_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->pdb.nb_pdb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+			       sizeof(struct pdb_v9_rcp_s));
+			break;
+		case HW_PDB_RCP_FIND:
+			rv = find_equal_index(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_COMPARE:
+			rv = do_compare_indexes(be->pdb.v9.rcp,
+				sizeof(struct pdb_v9_rcp_s), index, *value,
+				be->pdb.nb_pdb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_PDB_RCP_DESCRIPTOR:
+			get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+			break;
+		case HW_PDB_RCP_DESC_LEN:
+			get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+			break;
+		case HW_PDB_RCP_TX_PORT:
+			get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+			break;
+		case HW_PDB_RCP_TX_IGNORE:
+			get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+			break;
+		case HW_PDB_RCP_TX_NOW:
+			get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+			break;
+		case HW_PDB_RCP_CRC_OVERWRITE:
+			get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+			break;
+		case HW_PDB_RCP_ALIGN:
+			get_set(&be->pdb.v9.rcp[index].align, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS0_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS1_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_DYN:
+			get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+			break;
+		case HW_PDB_RCP_OFS2_REL:
+			get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+			break;
+		case HW_PDB_RCP_IP_PROT_TNL:
+			get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+			break;
+		case HW_PDB_RCP_PPC_HSH:
+			get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_EN:
+			get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+			break;
+		case HW_PDB_RCP_DUPLICATE_BIT:
+			get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+			break;
+		case HW_PDB_RCP_PCAP_KEEP_FCS:
+			get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+			  uint32_t value)
+{
+	switch (_VER_) {
+	case 9:
+		switch (field) {
+		case HW_PDB_CONFIG_TS_FORMAT:
+			be->pdb.v9.config->ts_format = value;
+			break;
+		case HW_PDB_CONFIG_PORT_OFS:
+			be->pdb.v9.config->port_ofs = value;
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 9 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
new file mode 100644
index 0000000000..b14c10bc53
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb_v9.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+	uint32_t descriptor;
+	uint32_t desc_len;
+	uint32_t tx_port;
+	uint32_t tx_ignore;
+	uint32_t tx_now;
+	uint32_t crc_overwrite;
+	uint32_t align;
+	uint32_t ofs0_dyn;
+	int32_t ofs0_rel;
+	uint32_t ofs1_dyn;
+	int32_t ofs1_rel;
+	uint32_t ofs2_dyn;
+	int32_t ofs2_rel;
+	uint32_t ip_prot_tnl;
+	uint32_t ppc_hsh;
+	uint32_t duplicate_en;
+	uint32_t duplicate_bit;
+	uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+	uint32_t ts_format;
+	uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+	struct pdb_v9_rcp_s *rcp;
+	struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
new file mode 100644
index 0000000000..86d23ea683
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_qsl_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "QSL MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_qsl_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+	be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+	nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+	be->qsl.nb_qst_entries = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 7:
+		if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+			&be->qsl.v7.rcp,
+			be->qsl.nb_rcp_categories,
+			sizeof(struct qsl_v7_rcp_s),
+			&be->qsl.v7.qst,
+			be->qsl.nb_qst_entries,
+			sizeof(struct qsl_v7_qst_s),
+			&be->qsl.v7.qen,
+			QSL_QEN_ENTRIES,
+			sizeof(struct qsl_v7_qen_s),
+			&be->qsl.v7.unmq,
+			QSL_QNMQ_ENTRIES,
+			sizeof(struct qsl_v7_unmq_s)))
+			return -1;
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+	if (be->qsl.base) {
+		free(be->qsl.base);
+		be->qsl.base = NULL;
+	}
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->qsl);
+
+	NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+	hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+	hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+	hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+	be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->qsl.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_rcp_s));
+			break;
+		case HW_QSL_RCP_FIND:
+			rv = find_equal_index(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_COMPARE:
+			rv = do_compare_indexes(be->qsl.v7.rcp,
+				sizeof(struct qsl_v7_rcp_s), index, *value,
+				be->qsl.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_QSL_RCP_DISCARD:
+			get_set(&be->qsl.v7.rcp[index].discard, value, get);
+			break;
+		case HW_QSL_RCP_DROP:
+			get_set(&be->qsl.v7.rcp[index].drop, value, get);
+			break;
+		case HW_QSL_RCP_TBL_LO:
+			get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+			break;
+		case HW_QSL_RCP_TBL_HI:
+			get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+			break;
+		case HW_QSL_RCP_TBL_IDX:
+			get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+			break;
+		case HW_QSL_RCP_TBL_MSK:
+			get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+			break;
+		case HW_QSL_RCP_LR:
+			get_set(&be->qsl.v7.rcp[index].lr, value, get);
+			break;
+		case HW_QSL_RCP_TSA:
+			get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+			break;
+		case HW_QSL_RCP_VLI:
+			get_set(&be->qsl.v7.rcp[index].vli, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->qsl.nb_qst_entries;
+	if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= be->qsl.nb_qst_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QST_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+			       sizeof(struct qsl_v7_qst_s));
+			break;
+		case HW_QSL_QST_QUEUE:
+			get_set(&be->qsl.v7.qst[index].queue, value, get);
+			break;
+		case HW_QSL_QST_EN:
+			get_set(&be->qsl.v7.qst[index].en, value, get);
+			break;
+		case HW_QSL_QST_TX_PORT:
+			get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+			break;
+		case HW_QSL_QST_LRE:
+			get_set(&be->qsl.v7.qst[index].lre, value, get);
+			break;
+		case HW_QSL_QST_TCI:
+			get_set(&be->qsl.v7.qst[index].tci, value, get);
+			break;
+		case HW_QSL_QST_VEN:
+			get_set(&be->qsl.v7.qst[index].ven, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QEN_ENTRIES;
+	if ((start_idx + count) > QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+			      enum hw_qsl_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	if (index >= QSL_QEN_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_QEN_EN:
+			get_set(&be->qsl.v7.qen[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+			  int count)
+{
+	if (count == ALL_ENTRIES)
+		count = QSL_QNMQ_ENTRIES;
+	if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+					 count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+			       enum hw_qsl_e field, uint32_t index,
+			       uint32_t *value, int get)
+{
+	if (index >= QSL_QNMQ_ENTRIES)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 7:
+		switch (field) {
+		case HW_QSL_UNMQ_DEST_QUEUE:
+			get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+			break;
+		case HW_QSL_UNMQ_EN:
+			get_set(&be->qsl.v7.unmq[index].en, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 7 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+			uint32_t index, uint32_t *value)
+{
+	return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
new file mode 100644
index 0000000000..747da4bc43
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl_v7.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+	uint32_t discard;
+	uint32_t drop;
+	uint32_t tbl_lo;
+	uint32_t tbl_hi;
+	uint32_t tbl_idx;
+	uint32_t tbl_msk;
+	uint32_t lr;
+	uint32_t tsa;
+	uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+	uint32_t queue;
+	uint32_t en;
+	uint32_t tx_port;
+	uint32_t lre;
+	uint32_t tci;
+	uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+	uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+	uint32_t dest_queue;
+	uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+	struct qsl_v7_rcp_s *rcp;
+	struct qsl_v7_qst_s *qst;
+	struct qsl_v7_qen_s *qen;
+	struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
new file mode 100644
index 0000000000..7f1d695a90
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_rmc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "RMC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 0x10003:
+		if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+			&be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+			return -1;
+		break;
+	/* end case 1_3 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+	if (be->rmc.base) {
+		free(be->rmc.base);
+		be->rmc.base = NULL;
+	}
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->rmc);
+
+	NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+	/* disable block stat, block keep alive */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+			    0xff); /* initially block all ports */
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+	hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+	return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+			       enum hw_rmc_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 0x10003:
+		switch (field) {
+		case HW_RMC_BLOCK_STATT:
+			get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+			break;
+		case HW_RMC_BLOCK_KEEPA:
+			get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+			break;
+		case HW_RMC_BLOCK_RPP_SLICE:
+			get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+			break;
+		case HW_RMC_BLOCK_MAC_PORT:
+			get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+			break;
+		case HW_RMC_LAG_PHY_ODD_EVEN:
+			get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1.3 */
+
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+			uint32_t *value)
+{
+	return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
new file mode 100644
index 0000000000..8f90695821
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_rmc_v1_3.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+	uint32_t block_statt;
+	uint32_t block_keepa;
+	uint32_t block_rpp_slice;
+	uint32_t block_mac_port;
+	uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+	struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
new file mode 100644
index 0000000000..933caf9fd5
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_roa_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "ROA MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_roa_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+	be->roa.nb_tun_categories = (uint32_t)nb;
+
+	be->roa.nb_tun_categories /= 4;
+
+	switch (_VER_) {
+	case 6:
+		be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+		if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+			&be->roa.v6.tunhdr,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tunhdr_s),
+			&be->roa.v6.tuncfg,
+			be->roa.nb_tun_categories,
+			sizeof(struct roa_v6_tuncfg_s),
+			&be->roa.v6.config,
+			1,
+			sizeof(struct roa_v6_config_s),
+			&be->roa.v6.lagcfg,
+			be->roa.nb_lag_entries,
+			sizeof(struct roa_v6_lagcfg_s)))
+			return -1;
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+	return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+	if (be->roa.base) {
+		free(be->roa.base);
+		be->roa.base = NULL;
+	}
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->roa);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+	err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+	hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+	NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+	hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+	hw_mod_roa_config_flush(be);
+
+	NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+	hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+	return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t word_off, uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNHDR_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tunhdr,
+				sizeof(struct roa_v6_tunhdr_s), index, word_off,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNHDR:
+			get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+				value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t word_off, uint32_t *value)
+{
+	return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_tun_categories;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->roa.nb_tun_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_TUNCFG_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+			       sizeof(struct roa_v6_tuncfg_s));
+			break;
+		case HW_ROA_TUNCFG_FIND:
+			rv = find_equal_index(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_COMPARE:
+			rv = do_compare_indexes(be->roa.v6.tuncfg,
+				sizeof(struct roa_v6_tuncfg_s), index, *value,
+				be->roa.nb_tun_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_ROA_TUNCFG_TUN_LEN:
+			get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_TUN_VLAN:
+			get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+			break;
+		case HW_ROA_TUNCFG_IP_TYPE:
+			get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_UPD:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPCS_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_UPD:
+			get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+			break;
+		case HW_ROA_TUNCFG_IPTL_PRECALC:
+			get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+			break;
+		case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+			get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+				value, get);
+			break;
+		case HW_ROA_TUNCFG_TX_LAG_IX:
+			get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRCULATE:
+			get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+			break;
+		case HW_ROA_TUNCFG_PUSH_TUNNEL:
+			get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_PORT:
+			get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+			break;
+		case HW_ROA_TUNCFG_RECIRC_BYPASS:
+			get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+	return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t *value, int get)
+{
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_CONFIG_FWD_RECIRCULATE:
+			get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT0:
+			get_set(&be->roa.v6.config->fwd_txport0, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_TXPORT1:
+			get_set(&be->roa.v6.config->fwd_txport1, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+			get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+			break;
+		case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+			get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t value)
+{
+	return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t *value)
+{
+	return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->roa.nb_lag_entries;
+	if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+					   count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+				 enum hw_roa_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	if (index >= be->roa.nb_lag_entries)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 6:
+		switch (field) {
+		case HW_ROA_LAGCFG_TXPHY_PORT:
+			get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+			break;
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 6 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
new file mode 100644
index 0000000000..9930c52428
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_roa_v6.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+	uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+	uint32_t tun_len;
+	uint32_t tun_type;
+	uint32_t tun_vlan;
+	uint32_t ip_type;
+	uint32_t ipcs_upd;
+	uint32_t ipcs_precalc;
+	uint32_t iptl_upd;
+	uint32_t iptl_precalc;
+	uint32_t vxlan_udp_len_upd;
+	uint32_t tx_lag_ix;
+	uint32_t recirculate;
+	uint32_t push_tunnel;
+	uint32_t recirc_port;
+	uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+	uint32_t fwd_recirculate;
+	uint32_t fwd_normal_pcks;
+	uint32_t fwd_txport0;
+	uint32_t fwd_txport1;
+	uint32_t fwd_cellbuilder_pcks;
+	uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+	uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+	struct roa_v6_tunhdr_s *tunhdr;
+	struct roa_v6_tuncfg_s *tuncfg;
+	struct roa_v6_config_s *config;
+	struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
new file mode 100644
index 0000000000..ca65db7232
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+			&be->slc.v1.rcp,
+			be->max_categories,
+			sizeof(struct slc_v1_rcp_s)))
+			return -1;
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+	if (be->slc.base) {
+		free(be->slc.base);
+		be->slc.base = NULL;
+	}
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc);
+
+	NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+	return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+			      enum hw_slc_e field, uint32_t index,
+			      uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+		switch (field) {
+		case HW_SLC_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_v1_s));
+			break;
+		case HW_SLC_RCP_FIND:
+			rv = find_equal_index(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc.v1.rcp,
+				sizeof(struct hw_mod_slc_v1_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_RCP_SLC_EN:
+			get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_RCP_DYN:
+			get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_RCP_OFS:
+			get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+			break;
+		case HW_SLC_RCP_PCAP:
+			get_set(&be->slc.v1.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+		       uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
new file mode 100644
index 0000000000..9f40563e8a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+	_VER_ = be->iface->get_slc_lr_version(be->be_dev);
+	NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION  %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	switch (_VER_) {
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+			&be->slc_lr.v2.rcp,
+			be->max_categories,
+			sizeof(struct slc_lr_v2_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+	if (be->slc_lr.base) {
+		free(be->slc_lr.base);
+		be->slc_lr.base = NULL;
+	}
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->slc_lr);
+
+	NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+	return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			    int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->max_categories;
+	if ((unsigned int)(start_idx + count) > be->max_categories)
+		return error_index_too_large(__func__);
+	return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+					   count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+				 enum hw_slc_lr_e field, uint32_t index,
+				 uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->max_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_SLC_LR_RCP_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+			       sizeof(struct hw_mod_slc_lr_v2_s));
+			break;
+		case HW_SLC_LR_RCP_FIND:
+			rv = find_equal_index(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_COMPARE:
+			rv = do_compare_indexes(be->slc_lr.v2.rcp,
+				sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+				be->max_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_SLC_LR_RCP_SLC_EN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+			break;
+		case HW_SLC_LR_RCP_DYN:
+			get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+			break;
+		case HW_SLC_LR_RCP_OFS:
+			get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+				       value, get);
+			break;
+		case HW_SLC_LR_RCP_PCAP:
+			get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+			  uint32_t index, uint32_t *value)
+{
+	return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
new file mode 100644
index 0000000000..d03b206e6d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr_v2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+	struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
new file mode 100644
index 0000000000..01f60bfb18
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_v1.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+	uint32_t tail_slc_en;
+	uint32_t tail_dyn;
+	int32_t tail_ofs;
+	uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+	struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
new file mode 100644
index 0000000000..dd6f06b2c4
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+	return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+	int nb;
+
+	_VER_ = be->iface->get_tpe_version(be->be_dev);
+	NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+	       VER_MINOR(_VER_));
+
+	nb = be->iface->get_nb_tpe_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+	be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+	be->tpe.nb_ifr_categories = 0;
+	if (_VER_ > 1) {
+		nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+		if (nb <= 0)
+			return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+		be->tpe.nb_ifr_categories = (uint32_t)nb;
+	}
+
+	nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+	be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+	be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+	nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+	if (nb <= 0)
+		return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+	be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+	switch (_VER_) {
+	case 1:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+			&be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v1.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	case 2:
+		if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+			&be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpp_v0_rcp_s),
+			&be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+			&be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+			sizeof(struct tpe_v2_ifr_v1_rcp_s),
+			&be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_ins_v1_rcp_s),
+			&be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_rpl_v2_rcp_s),
+			&be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+			sizeof(struct tpe_v1_rpl_v2_ext_s),
+			&be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+			sizeof(struct tpe_v1_rpl_v2_rpl_s),
+			&be->tpe.v2.cpy_rcp,
+			be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_cpy_v1_rcp_s),
+			&be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_hfu_v1_rcp_s),
+			&be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+			sizeof(struct tpe_v1_csu_v0_rcp_s)))
+			return -1;
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+	if (be->tpe.base) {
+		free(be->tpe.base);
+		be->tpe.base = NULL;
+	}
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+	int err = 0;
+
+	/* Zero entire cache area */
+	ZERO_MOD_CACHE(&be->tpe);
+
+	NT_LOG(DBG, FILTER, "INIT TPE\n");
+	err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+	err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+	if (_VER_ == 2) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+		err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+	}
+
+	return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+				 int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+						count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+				      enum hw_tpe_e field, uint32_t index,
+				      uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+			       enum hw_tpe_e field, int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPP_RCP_EXP:
+			get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_ifr_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_ifr_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 2:
+		switch (field) {
+		case HW_TPE_IFR_RCP_EN:
+			get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+			break;
+
+		case HW_TPE_IFR_RCP_MTU:
+			get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+				sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_INS_RCP_DYN:
+			get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_INS_RCP_OFS:
+			get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_INS_RCP_LEN:
+			get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+				sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RCP_DYN:
+			get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_RPL_RCP_OFS:
+			get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_RPL_RCP_LEN:
+			get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+			break;
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_ext_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_ext_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+				sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+				be->tpe.nb_rpl_ext_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+			break;
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rpl_depth;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rpl_depth)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+				be->tpe.nb_rpl_depth, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+				       sizeof(uint32_t) * 4);
+			else
+				memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+				       sizeof(uint32_t) * 4);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	if (count == ALL_ENTRIES)
+		count = cpy_size;
+	if ((unsigned int)(start_idx + count) > cpy_size)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	const uint32_t cpy_size =
+		be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+	int rv = 0;
+	if (index >= cpy_size)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+				cpy_size, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+			break;
+		case HW_TPE_CPY_RCP_DYN:
+			get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+			break;
+		case HW_TPE_CPY_RCP_OFS:
+			get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+			break;
+		case HW_TPE_CPY_RCP_LEN:
+			get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+				value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+			break;
+
+		case HW_TPE_HFU_RCP_CS_INF:
+			get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L3_FRAG:
+			get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+			break;
+		case HW_TPE_HFU_RCP_TUNNEL:
+			get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+			break;
+		case HW_TPE_HFU_RCP_L4_PRT:
+			get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L3_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+			break;
+		case HW_TPE_HFU_RCP_INNER_L4_OFS:
+			get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+			     int count)
+{
+	if (count == ALL_ENTRIES)
+		count = be->tpe.nb_rcp_categories;
+	if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+					    count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+				  enum hw_tpe_e field, uint32_t index,
+				  uint32_t *value, int get)
+{
+	int rv = 0;
+	if (index >= be->tpe.nb_rcp_categories)
+		return error_index_too_large(__func__);
+	switch (_VER_) {
+	case 1:
+	case 2:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get)
+				return error_unsup_field(__func__);
+			memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+			       sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+		case HW_TPE_FIND:
+			rv = find_equal_index(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, value, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_COMPARE:
+			rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+				sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+				be->tpe.nb_rcp_categories, get, __func__);
+			if (rv != 0)
+				return rv;
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+			break;
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+			break;
+
+		default:
+			return error_unsup_field(__func__);
+		}
+		break;
+	/* end case 1 */
+	default:
+		return error_unsup_ver(__func__, _MOD_, _VER_);
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+			   int index, uint32_t *value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
new file mode 100644
index 0000000000..dacd819659
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v1.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+	uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+	uint32_t rpl_ptr;
+	uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+	uint32_t rpl_ptr;
+	uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+	uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+	uint32_t reader_select;
+	uint32_t dyn;
+	uint32_t ofs;
+	uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+	uint32_t len_a_wr;
+	uint32_t len_a_outer_l4_len;
+	uint32_t len_a_pos_dyn;
+	uint32_t len_a_pos_ofs;
+	uint32_t len_a_add_dyn;
+	uint32_t len_a_add_ofs;
+	uint32_t len_a_sub_dyn;
+
+	uint32_t len_b_wr;
+	uint32_t len_b_pos_dyn;
+	uint32_t len_b_pos_ofs;
+	uint32_t len_b_add_dyn;
+	uint32_t len_b_add_ofs;
+	uint32_t len_b_sub_dyn;
+
+	uint32_t len_c_wr;
+	uint32_t len_c_pos_dyn;
+	uint32_t len_c_pos_ofs;
+	uint32_t len_c_add_dyn;
+	uint32_t len_c_add_ofs;
+	uint32_t len_c_sub_dyn;
+
+	uint32_t ttl_wr;
+	uint32_t ttl_pos_dyn;
+	uint32_t ttl_pos_ofs;
+
+	uint32_t cs_inf;
+	uint32_t l3_prt;
+	uint32_t l3_frag;
+	uint32_t tunnel;
+	uint32_t l4_prt;
+	uint32_t outer_l3_ofs;
+	uint32_t outer_l4_ofs;
+	uint32_t inner_l3_ofs;
+	uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+	uint32_t ol3_cmd;
+	uint32_t ol4_cmd;
+	uint32_t il3_cmd;
+	uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
new file mode 100644
index 0000000000..c56cad8d89
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe_v2.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+	uint32_t en;
+	uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+	struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+	struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+	struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+	struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+	struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+	struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+	struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+	struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+	struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+	struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
new file mode 100644
index 0000000000..eae8f176a8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/stream_binary_flow_api.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+	FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+	FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+	uint32_t group; /* Priority group. */
+	uint32_t priority; /* Rule priority level within group. */
+	uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+	uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+	int id;
+	int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+	FLOW_ELEM_TYPE_END,
+	FLOW_ELEM_TYPE_ANY,
+	FLOW_ELEM_TYPE_ETH,
+	FLOW_ELEM_TYPE_VLAN,
+	FLOW_ELEM_TYPE_IPV4,
+	FLOW_ELEM_TYPE_IPV6,
+	FLOW_ELEM_TYPE_SCTP,
+	FLOW_ELEM_TYPE_TCP,
+	FLOW_ELEM_TYPE_UDP,
+	FLOW_ELEM_TYPE_ICMP,
+	FLOW_ELEM_TYPE_VXLAN,
+	FLOW_ELEM_TYPE_GTP,
+	FLOW_ELEM_TYPE_PORT_ID,
+	FLOW_ELEM_TYPE_TAG,
+	FLOW_ELEM_TYPE_VOID,
+
+	/*
+	 * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+	 * extension
+	 */
+	FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+	FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+	FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+	FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+	FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+	FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+	/* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+	FLOW_ACTION_TYPE_VXLAN_DECAP,
+	FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+	FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+	FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+	FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+	FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+	/* struct flow_action_port_id : Destination port ID - HW port ID */
+	FLOW_ACTION_TYPE_PORT_ID,
+	FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+	FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+	FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+	/* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+	FLOW_ACTION_TYPE_METER,
+	FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+	FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+	FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+	/*
+	 * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+	 * specific extension
+	 */
+	FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+	uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+		const struct ether_addr_s *eth_addr)
+{
+	snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+		 eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+		 eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+	uint8_t version_ihl;
+	uint8_t tos;
+	be16_t length;
+	be16_t id;
+	be16_t frag_offset;
+	uint8_t ttl;
+	uint8_t next_proto_id;
+	be16_t hdr_csum;
+	be32_t src_ip;
+	be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+	be32_t vtc_flow; /* IP version, traffic class & flow label */
+	be16_t payload_len; /* IP packet length - includes ip header */
+	uint8_t proto;
+	uint8_t hop_limits;
+	uint8_t src_addr[16];
+	uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t tag; /* Validation tag */
+	be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be32_t sent_seq;
+	be32_t recv_ack;
+	uint8_t data_off;
+	uint8_t tcp_flags;
+	be16_t rx_win;
+	be16_t cksum;
+	be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+	be16_t src_port;
+	be16_t dst_port;
+	be16_t len;
+	be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+	uint8_t type;
+	uint8_t code;
+	be16_t cksum;
+	be16_t ident;
+	be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+	struct ether_addr_s d_addr; /* DMAC */
+	struct ether_addr_s s_addr; /* SMAC */
+	be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+	be16_t tci; /* Tag control information */
+	be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+	struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+	struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+	struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+	struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+	struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+	struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type;
+	be16_t msg_len;
+	be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+	uint8_t flags; /* Normally 0x08 (I flag) */
+	uint8_t rsvd0[3];
+	uint8_t vni[3];
+	uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+	uint32_t data;
+	uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+	uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+	enum flow_elem_type type; /* element type */
+	const void *spec; /* Pointer to element specification structure */
+	const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+	FLOW_HASH_FUNCTION_DEFAULT = 0,
+	FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+	FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+	enum flow_hash_function func;
+	uint32_t level; /* only level 0 supported */
+	/* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+	uint64_t types;
+	uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+	uint32_t queue_num; /* Number of entries in queue */
+	const uint8_t *key; /* Not supported yet - Hash key */
+	const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+	be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+	be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+	uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+	uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+	uint32_t reset : 1;
+	uint32_t hits_set : 1;
+	uint32_t bytes_set : 1;
+
+	uint32_t tcp_flags : 9;
+
+	uint32_t reserved : 20;
+	uint64_t hits;
+	uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+	uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+	uint32_t data; /* tag flow with this value */
+	uint32_t mask; /* bit-mask applied to "data" */
+	uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+	uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+	uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+	uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+	uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+	uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+	FLOW_FIELD_START = 0, /* Start of a packet. */
+	FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+	FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+	FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+	FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+	FLOW_FIELD_MAC_TYPE, /* EtherType. */
+	FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+	FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+	FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+	FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+	FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+	FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+	FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+	FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+	FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+	FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+	FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+	FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+	FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+	FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+	FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+	FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+	FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+	FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+	FLOW_FIELD_TAG, /* Tag value. */
+	FLOW_FIELD_MARK, /* Mark value. */
+	FLOW_FIELD_META, /* Metadata value. */
+	FLOW_FIELD_POINTER, /* Memory pointer. */
+	FLOW_FIELD_VALUE, /* Immediate value. */
+	FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+	FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+	FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+	FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+	enum flow_field_id field; /* Field or memory type ID. */
+	union {
+		struct {
+			/* Encapsulation level or tag index. */
+			uint32_t level;
+			/* Number of bits to skip from a field. */
+			uint32_t offset;
+		};
+		/*
+		 * Immediate value for FLOW_FIELD_VALUE, presented in the
+		 * same byte order and length as in relevant rte_flow_item_xxx.
+		 */
+		uint8_t value[16];
+		/*
+		 * Memory address for FLOW_FIELD_POINTER, memory layout
+		 * should be the same as for relevant field in the
+		 * rte_flow_item_xxx structure.
+		 */
+		void *pvalue;
+	};
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+	FLOW_MODIFY_SET = 0,
+	FLOW_MODIFY_ADD,
+	FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+	enum flow_modify_op operation;
+	struct flow_action_modify_data dst;
+	struct flow_action_modify_data src;
+	uint32_t width;
+};
+
+struct flow_action {
+	enum flow_action_type type;
+	const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+	enum flow_error_e type;
+	const char *message;
+};
+
+enum flow_lag_cmd {
+	FLOW_LAG_SET_ENTRY,
+	FLOW_LAG_SET_ALL,
+	FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+	union {
+		struct {
+			uint32_t src_ip; /* BE */
+			uint32_t dst_ip; /* BE */
+		} v4;
+		struct {
+			uint8_t src_ip[16];
+			uint8_t dst_ip[16];
+		} v6;
+		struct {
+			uint64_t src_ip[2];
+			uint64_t dst_ip[2];
+		} v6_long;
+	};
+	int ipversion;
+	uint16_t s_port; /* BE */
+	uint16_t d_port; /* BE */
+	int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+				      uint32_t port_id, int alloc_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, uint32_t policy_id,
+			  uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v16 6/8] net/ntnic: adds flow logic
  2023-09-08 16:07 ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (3 preceding siblings ...)
  2023-09-08 16:07   ` [PATCH v16 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
@ 2023-09-08 16:07   ` Mykola Kostenok
  2023-09-08 16:07   ` [PATCH v16 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
                     ` (2 subsequent siblings)
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-08 16:07 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

This logic layer takes rte_flow style patterns and actions as input,
and programs the FPGA accordingly.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v11:
* Replace stdatomic by compiler build-in atomic
* Fix dereferencing type-punned pointer in macro
* Inner offset must exclude VLAN bytes
v12:
* Fix error=array-bounds
v14:
* Fixed code checking for TPE resource reuse
---
 drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +
 drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-
 drivers/net/ntnic/meson.build                 |    4 +
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1307 +++++
 drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +
 .../nthw/flow_api/flow_api_profile_inline.c   | 5130 +++++++++++++++++
 .../nthw/flow_api/flow_api_profile_inline.h   |   56 +
 .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 ++++++++++
 .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +
 .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +
 .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +
 11 files changed, 10091 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
 create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c
 create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h

diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c
index 259aae2831..f9493202c3 100644
--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c
+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c
@@ -10,6 +10,8 @@
 #include "nt4ga_pci_ta_tg.h"
 #include "nt4ga_link_100g.h"
 
+#include "flow_filter.h"
+
 /* Sensors includes */
 #include "board_sensors.h"
 #include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
 	n_nim_ports = fpga_info->n_nims;
 	assert(n_nim_ports >= 1);
 
+	/* Nt4ga Init Filter */
+	nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+	res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+			     p_adapter_info->adapter_no);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+		       p_adapter_id_str);
+		return res;
+	}
+
 	/*
 	 * HIF/PCI TA/TG
 	 */
diff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c
index b61c73ea12..2c822c6b97 100644
--- a/drivers/net/ntnic/adapter/nt4ga_stat.c
+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c
@@ -7,6 +7,7 @@
 #include "nthw_drv.h"
 #include "nthw_fpga.h"
 #include "nt4ga_adapter.h"
+#include "flow_filter.h"
 
 #define NO_FLAGS 0
 
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
 	return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
 }
 
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+				   nt4ga_stat_t *p_nt4ga_stat,
 				   uint32_t *p_stat_dma_virtual);
 static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 				    uint32_t *p_stat_dma_virtual);
 
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
 		      nt4ga_stat_t *p_nt4ga_stat)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
 	} else {
 		p_nt4ga_stat->last_timestamp =
 			timestamp2ns(*p_nthw_stat->mp_timestamp);
-		nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+		nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
 					       p_nt4ga_stat->p_stat_dma_virtual);
 	}
 	return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
 			return -1;
 		}
 
-		p_nt4ga_stat->flm_stat_ver = 0;
+		struct flow_nic_dev *ndev =
+				p_adapter_info->nt4ga_filter.mp_flow_device;
+		p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
 
 		p_nt4ga_stat->mp_stat_structs_flm =
 			calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 }
 
 /* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+					   nt4ga_stat_t *p_nt4ga_stat,
 					   uint32_t *p_stat_dma_virtual)
 {
 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
 
 	const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
 	const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
 		p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
 	}
 
+	/* _update and get FLM stats */
+	flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+			   sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
 	return 0;
 }
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 8a5a3d5deb..0ae574f9ca 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -61,8 +61,10 @@ sources = files(
     'nthw/core/nthw_spim.c',
     'nthw/core/nthw_spis.c',
     'nthw/core/nthw_tsm.c',
+    'nthw/flow_api/flow_api.c',
     'nthw/flow_api/flow_api_actions.c',
     'nthw/flow_api/flow_api_backend.c',
+    'nthw/flow_api/flow_api_profile_inline.c',
     'nthw/flow_api/flow_engine/flow_group.c',
     'nthw/flow_api/flow_engine/flow_hasher.c',
     'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
     'nthw/flow_api/hw_mod/hw_mod_slc.c',
     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
     'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+    'nthw/flow_filter/flow_backend.c',
+    'nthw/flow_filter/flow_filter.c',
     'nthw/flow_filter/flow_nthw_cat.c',
     'nthw/flow_filter/flow_nthw_csu.c',
     'nthw/flow_filter/flow_nthw_flm.c',
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
new file mode 100644
index 0000000000..8cdf15663d
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -0,0 +1,1307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+	/* RES_QUEUE */ "RES_QUEUE",
+	/* RES_CAT_CFN */ "RES_CAT_CFN",
+	/* RES_CAT_COT */ "RES_CAT_COT",
+	/* RES_CAT_EXO */ "RES_CAT_EXO",
+	/* RES_CAT_LEN */ "RES_CAT_LEN",
+	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+	/* RES_HSH_RCP */ "RES_HSH_RCP",
+	/* RES_PDB_RCP */ "RES_PDB_RCP",
+	/* RES_QSL_RCP */ "RES_QSL_RCP",
+	/* RES_QSL_LTX */ "RES_QSL_LTX",
+	/* RES_QSL_QST */ "RES_QSL_QST",
+	/* RES_SLC_RCP */ "RES_SLC_RCP",
+	/* RES_IOA_RCP */ "RES_IOA_RCP",
+	/* RES_ROA_RCP */ "RES_ROA_RCP",
+	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+	/* RES_FLM_RCP */ "RES_FLM_RCP",
+	/* RES_HST_RCP */ "RES_HST_RCP",
+	/* RES_TPE_RCP */ "RES_TPE_RCP",
+	/* RES_TPE_EXT */ "RES_TPE_EXT",
+	/* RES_TPE_RPL */ "RES_TPE_RPL",
+	/* RES_COUNT */ "RES_COUNT",
+	/* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+	const char *message;
+} err_msg[] = {
+	/* 00 */ { "Operation successfully completed" },
+	/* 01 */ { "Operation failed" },
+	/* 02 */ { "Memory allocation failed" },
+	/* 03 */ { "Too many output destinations" },
+	/* 04 */ { "Too many output queues for RSS" },
+	/* 05 */ { "The VLAN TPID specified is not supported" },
+	/* 06 */ { "The VxLan Push header specified is not accepted" },
+	/* 07 */
+	{ "While interpreting VxLan Pop action, could not find a destination port" },
+	/* 08 */ { "Failed in creating a HW-internal VTEP port" },
+	/* 09 */ { "Too many VLAN tag matches" },
+	/* 10 */ { "IPv6 invalid header specified" },
+	/* 11 */ { "Too many tunnel ports. HW limit reached" },
+	/* 12 */ { "Unknown or unsupported flow match element received" },
+	/* 13 */ { "Match failed because of HW limitations" },
+	/* 14 */ { "Match failed because of HW resource limitations" },
+	/* 15 */ { "Match failed because of too complex element definitions" },
+	/* 16 */ { "Action failed. To too many output destinations" },
+	/* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+	/* 18 */
+	{ "Push Tunnel Header action cannot output to multiple destination queues" },
+	/* 19 */ { "Inline action HW resource exhaustion" },
+	/* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+	/* 21 */ { "Flow counter HW resource exhaustion" },
+	/* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+	/* 23 */ { "Internal HW QSL compare failed" },
+	/* 24 */ { "Internal CAT CFN reuse failed" },
+	/* 25 */ { "Match variations too complex" },
+	/* 26 */ { "Match failed because of CAM/TCAM full" },
+	/* 27 */ { "Internal creation of a tunnel end point port failed" },
+	/* 28 */ { "Unknown or unsupported flow action received" },
+	/* 29 */ { "Removing flow failed" },
+	/* 30 */
+	{ "No output queue specified. Ignore this flow offload and uses default queue" },
+	/* 31 */ { "No output queue found" },
+	/* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+	/* 33 */
+	{ "Destination port specified is invalid or not reachable from this NIC" },
+	/* 34 */ { "Partial offload is not supported in this configuration" },
+	/* 35 */ { "Match failed because of CAT CAM exhausted" },
+	/* 36 */
+	{ "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+	/* 37 */ { "Match failed because of CAT CAM write failed" },
+	/* 38 */ { "Partial flow mark too big for device" },
+	/* 39 */  {"Invalid priority value"},
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+	assert(msg < ERR_MSG_NO_MSG);
+	if (error) {
+		error->message = err_msg[msg].message;
+		error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+			      FLOW_ERROR_GENERAL;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment)
+{
+	for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+			i += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+			flow_nic_mark_resource_used(ndev, res_type, i);
+			ndev->res[res_type].ref[i] = 1;
+			return i;
+		}
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type)
+{
+	if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+		flow_nic_mark_resource_used(ndev, res_type, idx);
+		ndev->res[res_type].ref[idx] = 1;
+		return 0;
+	}
+	return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment)
+{
+	unsigned int idx_offs;
+
+	for (unsigned int res_idx = 0;
+			res_idx < ndev->res[res_type].resource_count - (num - 1);
+			res_idx += alignment) {
+		if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+			for (idx_offs = 1; idx_offs < num; idx_offs++) {
+				if (flow_nic_is_resource_used(ndev, res_type,
+							      res_idx + idx_offs))
+					break;
+			}
+			if (idx_offs < num)
+				continue;
+
+			/* found a contiguous number of "num" res_type elements - allocate them */
+			for (idx_offs = 0; idx_offs < num; idx_offs++) {
+				flow_nic_mark_resource_used(ndev, res_type,
+							    res_idx + idx_offs);
+				ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+			}
+			return res_idx;
+		}
+	}
+	return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx)
+{
+	flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "Reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+		return -1;
+	ndev->res[res_type].ref[index]++;
+	return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index)
+{
+	NT_LOG(DBG, FILTER,
+	       "De-reference resource %s idx %i (before ref cnt %i)\n",
+	       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+	assert(flow_nic_is_resource_used(ndev, res_type, index));
+	assert(ndev->res[res_type].ref[index]);
+	/* deref */
+	ndev->res[res_type].ref[index]--;
+	if (!ndev->res[res_type].ref[index])
+		flow_nic_free_resource(ndev, res_type, index);
+	return !!ndev->res[res_type]
+	       .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start)
+{
+	for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+			i++) {
+		if (flow_nic_is_resource_used(ndev, res_type, i))
+			return i;
+	}
+	return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ *   ndev       : device
+ *   res_type   : resource type
+ *   fh         : flow handle
+ *   count      : number of (contiguous) resources to be allocated
+ *   alignment  : start index alignment
+ *                  1: the allocation can start at any index
+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ *                  etc.
+ * Returns:
+ *          0   : success
+ *         -1   : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment)
+{
+	if (count > 1) {
+		/* Contiguous */
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+	} else {
+		fh->resource[res_type].index =
+			flow_nic_alloc_resource(ndev, res_type, alignment);
+	}
+
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = count;
+	return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh)
+{
+	int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+	if (err)
+		return err;
+
+	fh->resource[res_type].index = idx;
+	if (fh->resource[res_type].index < 0)
+		return -1;
+	fh->resource[res_type].count = 1;
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm)
+{
+	hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+	switch (algorithm) {
+	case HASH_ALGO_5TUPLE:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+				   hsh_idx, 0, 2);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+				   DYN_L4);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+				   0);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+				   1);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_5TUPLE);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+
+		NT_LOG(DBG, FILTER,
+		       "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+		break;
+	default:
+	case HASH_ALGO_ROUND_ROBIN:
+		/* zero is round-robin */
+		break;
+	}
+
+	return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss f)
+{
+	uint64_t fields = f.fields;
+
+	int res = 0;
+
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+				  0);
+	res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+				  0, 2);
+	switch (fields) {
+	case NT_ETH_RSS_C_VLAN:
+		/*
+		 * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+		 * provider
+		 */
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+					  0, DYN_FIRST_VLAN);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+					  0, 0);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+					  hsh_idx, 8, 0xffffffff);
+		res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+					  hsh_idx, 0, HASH_LAST_VLAN_ID);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "VLAN hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+				   DYN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+				   0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_OUTER_DST_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Outer dst IP hasher is not set hardware communication problem has "
+			       "occurred. The cardware could be in inconsistent state. Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+		/* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+				   DYN_TUN_FINAL_IP_DST);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+				   -16);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+				   0xffffffff);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+				   0xffffffff);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+				   HASH_INNER_SRC_IP);
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+				   hsh_idx, 0, 1);
+		if (res) {
+			NT_LOG(ERR, FILTER,
+			       "Inner (depth = 1) src IP hasher is not set hardware communication "
+			       "problem has occurred. The cardware could be in inconsistent state. "
+			       "Rerun.\n");
+			return -1;
+		}
+		NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+		return 0;
+	default:
+		NT_LOG(ERR, FILTER,
+		       "RSS bit flags can't be set up. "
+		       "Flags combination is not supported.");
+		return -1;
+	}
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+	struct flow_nic_dev *nic_dev = dev_base;
+
+	while (nic_dev) {
+		if (nic_dev->adapter_no == adapter_no)
+			break;
+		nic_dev = nic_dev->next;
+	}
+
+	if (!nic_dev)
+		return NULL;
+
+	struct flow_eth_dev *dev = nic_dev->eth_base;
+
+	while (dev) {
+		if (port == dev->port)
+			return dev;
+		dev = dev->next;
+	}
+
+	return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = dev_base;
+
+	while (ndev) {
+		if (adapter_no == ndev->adapter_no)
+			break;
+		ndev = ndev->next;
+	}
+	return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/*
+	 * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+	 * and reported as N*2 incoming port
+	 */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+	/* Blocks for traffic from port */
+	hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+	hw_mod_rmc_ctrl_flush(&ndev->be);
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+			    uint32_t value)
+{
+	hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+	hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	switch (cmd) {
+	case FLOW_LAG_SET_ENTRY:
+		write_lag_entry(&ndev->be, index, value);
+		break;
+
+	case FLOW_LAG_SET_ALL:
+		index &= 3;
+		for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+				i += 4)
+			write_lag_entry(&ndev->be, i + index, value);
+		break;
+
+	case FLOW_LAG_SET_BALANCE:
+		/*
+		 * This function will balance the output port
+		 * value: The balance of the distribution:
+		 * port  P0 / P1
+		 * 0:    0  / 100    port 0 is disabled
+		 * 25:   25 / 75
+		 * 50:   50 / 50
+		 * 75:   75 / 25
+		 * 100:  100/  0     port 1 is disabled
+		 */
+	{
+		/* Clamp the balance to 100% output on port 1 */
+		if (value > 100)
+			value = 100;
+		double balance = ((double)value / 100.0);
+		double block_count =
+			(double)ndev->be.roa.nb_lag_entries / 4.0;
+
+		int output_port = 1;
+		int port0_output_block_count =
+			(int)(block_count * balance);
+
+		for (int block = 0; block < block_count; block++) {
+			/* When the target port0 balance is reached. */
+			if (block >= port0_output_block_count)
+				output_port = 2;
+			/* Write an entire hash block to a given output port. */
+			for (int idx = 0; idx < 4; idx++) {
+				write_lag_entry(&ndev->be,
+						block * 4 + idx,
+						output_port);
+			} /* for each index in hash block */
+		} /* for each hash block */
+	}
+
+	break;
+	default:
+		pthread_mutex_unlock(&base_mtx);
+		return -1;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+		  const struct flow_action action[], struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+				const struct flow_attr *attr,
+				const struct flow_elem item[],
+				const struct flow_action action[],
+				struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return NULL;
+	}
+	return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+		 struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+	       const struct flow_action *action, void **data, uint32_t *length,
+	       struct flow_error *error)
+{
+	if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return -1;
+	}
+	return flow_query_profile_inline(dev, flow, action, data, length,
+					 error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+				    struct flow_eth_dev *dev)
+{
+	dev->next = ndev->eth_base;
+	ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+				   struct flow_eth_dev *eth_dev)
+{
+	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+	while (dev) {
+		if (dev == eth_dev) {
+			if (prev)
+				prev->next = dev->next;
+
+			else
+				ndev->eth_base = dev->next;
+			return 0;
+		}
+		prev = dev;
+		dev = dev->next;
+	}
+	return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+	/* Delete all eth-port devices created on this NIC device */
+	while (ndev->eth_base)
+		flow_delete_eth_dev(ndev->eth_base);
+
+	/* Error check */
+	while (ndev->flow_base) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+		       ndev->flow_base);
+
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			return;
+		}
+		flow_destroy_profile_inline(ndev->flow_base->dev,
+					    ndev->flow_base, NULL);
+	}
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+		NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+		return;
+	}
+	done_flow_management_of_ndev_profile_inline(ndev);
+
+	km_free_ndev_resource_management(&ndev->km_res_handle);
+	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+	/*
+	 * free all resources default allocated, initially for this NIC DEV
+	 * Is not really needed since the bitmap will be freed in a sec. Therefore
+	 * only in debug mode
+	 */
+
+	/* Check if all resources has been released */
+	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+	for (unsigned int i = 0; i < RES_COUNT; i++) {
+		int err = 0;
+#if defined(FLOW_DEBUG)
+		NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+		for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+				ii++) {
+			int ref = ndev->res[i].ref[ii];
+			int used = flow_nic_is_resource_used(ndev, i, ii);
+
+			if (ref || used) {
+				NT_LOG(DBG, FILTER,
+				       "  [%i]: ref cnt %i, used %i\n", ii, ref,
+				       used);
+				err = 1;
+			}
+		}
+		if (err)
+			NT_LOG(DBG, FILTER,
+			       "ERROR - some resources not freed\n");
+	}
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev)
+		return -1;
+	flow_ndev_reset(ndev);
+	flow_api_backend_reset(&ndev->be);
+	return 0;
+}
+
+/*
+ * adapter_no       physical adapter no
+ * port_no          local port no
+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+				      uint32_t port_id, int alloc_rx_queues,
+				      struct flow_queue_id_s queue_ids[],
+				      int *rss_target_id,
+				      enum flow_eth_dev_profile flow_profile,
+				      uint32_t exception_path)
+{
+	int i;
+	struct flow_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, FILTER,
+	       "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+	       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+	if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+		assert(0);
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Internal array for multiple queues too small for API\n");
+	}
+
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+	if (!ndev) {
+		/* Error - no flow api found on specified adapter */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: no flow interface registered for adapter %d\n",
+		       adapter_no);
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if (ndev->ports < ((uint16_t)port_no + 1)) {
+		NT_LOG(ERR, FILTER,
+		       "ERROR: port exceeds supported port range for adapter\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	if ((alloc_rx_queues - 1) >
+			FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+		NT_LOG(ERR, FILTER,
+		       "ERROR: Exceeds supported number of rx queues per eth device\n");
+		pthread_mutex_unlock(&base_mtx);
+		return NULL;
+	}
+
+	/* don't accept multiple eth_dev's on same NIC and same port */
+	eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+	if (eth_dev) {
+		NT_LOG(DBG, FILTER,
+		       "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+		       adapter_no, port_no);
+		pthread_mutex_unlock(&base_mtx);
+		flow_delete_eth_dev(eth_dev);
+		eth_dev = NULL;
+	}
+
+	eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+	if (!eth_dev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		goto err_exit1;
+	}
+
+	pthread_mutex_lock(&ndev->mtx);
+
+	eth_dev->ndev = ndev;
+	eth_dev->port = port_no;
+	eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* First time then NIC is initialized */
+	if (!ndev->flow_mgnt_prepared) {
+		ndev->flow_profile = flow_profile;
+		/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+		if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+			NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+			goto err_exit0;
+		} else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+			!= 0) {
+			goto err_exit0;
+		}
+	} else {
+		/* check if same flow type is requested, otherwise fail */
+		if (ndev->flow_profile != flow_profile) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Different flow types requested on same NIC device. "
+			       "Not supported.\n");
+			goto err_exit0;
+		}
+	}
+
+	/* Allocate the requested queues in HW for this dev */
+
+	for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+		eth_dev->rx_queue[i] = queue_ids[i];
+#else
+		int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+		if (queue_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: no more free queue IDs in NIC\n");
+			goto err_exit0;
+		}
+
+		eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+		eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+			ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+				eth_dev->rx_queue[eth_dev->num_queues].id);
+		if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: could not allocate a new queue\n");
+			goto err_exit0;
+		}
+
+		if (queue_ids) {
+			queue_ids[eth_dev->num_queues] =
+				eth_dev->rx_queue[eth_dev->num_queues];
+		}
+#endif
+		if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+				(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+				 exception_path))) {
+			/*
+			 * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+			 */
+			if (hw_mod_qsl_unmq_set(&ndev->be,
+						HW_QSL_UNMQ_DEST_QUEUE,
+						eth_dev->port,
+						eth_dev->rx_queue[0].hw_id) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+						eth_dev->port, 1) < 0)
+				goto err_exit0;
+			if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+					0)
+				goto err_exit0;
+		}
+
+		eth_dev->num_queues++;
+	}
+
+	eth_dev->rss_target_id = -1;
+
+	if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (i = 0; i < eth_dev->num_queues; i++) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value | (1 << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+	*rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	nic_insert_eth_port_dev(ndev, eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+	return eth_dev;
+
+err_exit0:
+	pthread_mutex_unlock(&ndev->mtx);
+	pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+	if (eth_dev)
+		free(eth_dev);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+	return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+			   struct flow_queue_id_s *queue_id)
+{
+	uint32_t qen_value = 0;
+
+	eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+	eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+	eth_dev->num_queues += 1;
+
+	hw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4, &qen_value);
+	hw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,
+			   queue_id->hw_id / 4,
+			   qen_value | (1 << (queue_id->hw_id % 4)));
+	hw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+	return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+	struct flow_nic_dev *ndev = eth_dev->ndev;
+
+	if (!ndev) {
+		/* Error invalid nic device */
+		return -1;
+	}
+
+	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+	       eth_dev->port);
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	/* delete all created flows from this device */
+	pthread_mutex_lock(&ndev->mtx);
+
+	struct flow_handle *flow = ndev->flow_base;
+
+	while (flow) {
+		if (flow->dev == eth_dev) {
+			struct flow_handle *flow_next = flow->next;
+
+			if (ndev->flow_profile ==
+					FLOW_ETH_DEV_PROFILE_VSWITCH) {
+				NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+				return -1;
+			}
+			flow_destroy_locked_profile_inline(eth_dev,
+							   flow, NULL);
+			flow = flow_next;
+		} else {
+			flow = flow->next;
+		}
+	}
+
+	/*
+	 * remove unmatched queue if setup in QSL
+	 * remove exception queue setting in QSL UNM
+	 */
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+			    0);
+	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+		for (int i = 0; i < eth_dev->num_queues; ++i) {
+			uint32_t qen_value = 0;
+			uint32_t queue_id =
+				(uint32_t)eth_dev->rx_queue[i].hw_id;
+
+			hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4, &qen_value);
+			hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+					   queue_id / 4,
+					   qen_value & ~(1U << (queue_id % 4)));
+			hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+		}
+	}
+
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+	/* free rx queues */
+	for (int i = 0; i < eth_dev->num_queues; i++) {
+		ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+					      eth_dev->rx_queue[i].hw_id);
+		flow_nic_deref_resource(ndev, RES_QUEUE,
+					eth_dev->rx_queue[i].id);
+	}
+#endif
+
+	/* take eth_dev out of ndev list */
+	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+	pthread_mutex_unlock(&ndev->mtx);
+
+	/* free eth_dev */
+	free(eth_dev);
+	return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+			       uint8_t vport)
+{
+	return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * *****************************  Flow API NIC Setup  ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type, uint32_t count)
+{
+	assert(ndev->res[res_type].alloc_bm == NULL);
+	/* allocate bitmap and ref counter */
+	ndev->res[res_type].alloc_bm =
+		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+	if (ndev->res[res_type].alloc_bm) {
+		ndev->res[res_type].ref =
+			(uint32_t *)&ndev->res[res_type]
+			.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+		ndev->res[res_type].resource_count = count;
+		return 0;
+	}
+	return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type)
+{
+	assert(ndev);
+	if (ndev->res[res_type].alloc_bm)
+		free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	ndev->next = dev_base;
+	dev_base = ndev;
+	pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+	pthread_mutex_lock(&base_mtx);
+	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+	while (nic_dev) {
+		if (nic_dev == ndev) {
+			if (prev)
+				prev->next = nic_dev->next;
+			else
+				dev_base = nic_dev->next;
+			pthread_mutex_unlock(&base_mtx);
+			return 0;
+		}
+		prev = nic_dev;
+		nic_dev = nic_dev->next;
+	}
+
+	pthread_mutex_unlock(&base_mtx);
+	return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+				     const struct flow_api_backend_ops *be_if,
+				     void *be_dev)
+{
+	if (!be_if || be_if->version != 1) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+
+	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+	if (!ndev) {
+		NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+		return NULL;
+	}
+
+	/*
+	 * To dump module initialization writes use
+	 * FLOW_BACKEND_DEBUG_MODE_WRITE
+	 * then remember to set it ...NONE afterwards again
+	 */
+	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+		goto err_exit;
+	ndev->adapter_no = adapter_no;
+
+	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+				 256 :
+				 ndev->be.num_rx_ports);
+
+	/*
+	 * Free resources in NIC must be managed by this module
+	 * Get resource sizes and create resource manager elements
+	 */
+	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_CFN,
+				   ndev->be.cat.nb_cat_funcs))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_KM_CATEGORY,
+				   ndev->be.km.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_PDB_RCP,
+				   ndev->be.pdb.nb_pdb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_RCP,
+				   ndev->be.qsl.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_QSL_QST,
+				   ndev->be.qsl.nb_qst_entries))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_IOA_RCP,
+				   ndev->be.ioa.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_ROA_RCP,
+				   ndev->be.roa.nb_tun_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+				   ndev->be.cat.nb_flow_types))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_FLM_RCP,
+				   ndev->be.flm.nb_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_HST_RCP,
+				   ndev->be.hst.nb_hst_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RCP,
+				   ndev->be.tpe.nb_rcp_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_EXT,
+				   ndev->be.tpe.nb_rpl_ext_categories))
+		goto err_exit;
+	if (init_resource_elements(ndev, RES_TPE_RPL,
+				   ndev->be.tpe.nb_rpl_depth))
+		goto err_exit;
+
+	/* may need IPF, COR */
+
+	/* check all defined has been initialized */
+	for (int i = 0; i < RES_COUNT; i++)
+		assert(ndev->res[i].alloc_bm);
+
+	pthread_mutex_init(&ndev->mtx, NULL);
+	list_insert_flow_nic(ndev);
+
+	return ndev;
+
+err_exit:
+	if (ndev)
+		flow_api_done(ndev);
+	NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+	NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+	if (ndev) {
+		flow_ndev_reset(ndev);
+
+		/* delete resource management allocations for this ndev */
+		for (int i = 0; i < RES_COUNT; i++)
+			done_resource_elements(ndev, i);
+
+		flow_api_backend_done(&ndev->be);
+		list_remove_flow_nic(ndev);
+		free(ndev);
+	}
+	return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+	if (!ndev) {
+		NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+		return NULL;
+	}
+	return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+	struct flow_eth_dev *eth_dev =
+		nic_and_port_to_eth_dev(adapter_no, port_no);
+	return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+	if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+		return flow_get_flm_stats_profile_inline(ndev, data, size);
+	return -1;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h
new file mode 100644
index 0000000000..9dbaac49e8
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ *        Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+	uint8_t *alloc_bm; /* allocation bitmap */
+	uint32_t *ref; /* reference counter for each resource element */
+	uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP                                           \
+	(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \
+	 NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \
+	 NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+	 NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+	uint64_t fields;
+};
+
+struct flow_eth_dev {
+	struct flow_nic_dev *ndev; /* NIC that owns this port device */
+	uint8_t port; /* NIC port id */
+	uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+	struct flow_queue_id_s
+		rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+	int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+	int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+	struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+	HASH_ALGO_ROUND_ROBIN = 0,
+	HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+	uint8_t adapter_no; /* physical adapter no in the host system */
+	uint16_t ports; /* number of in-ports addressable on this NIC */
+	enum flow_eth_dev_profile
+	flow_profile; /* flow profile this NIC is initially prepared for */
+	int flow_mgnt_prepared;
+
+	struct hw_mod_resource_s
+		res[RES_COUNT]; /* raw NIC resource allocation table */
+	void *flm_res_handle;
+	void *km_res_handle;
+	void *kcc_res_handle;
+
+	void *flm_mtr_handle;
+	void *ft_res_handle;
+	void *mtr_stat_handle;
+	void *group_handle;
+
+	/* statistics */
+	uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+	struct flow_handle
+		*flow_base; /* linked list of all flows created on this NIC */
+	struct flow_handle *
+		flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+	struct flow_api_backend_s be; /* NIC backend API */
+	struct flow_eth_dev *
+		eth_base; /* linked list of created eth-port devices on this NIC */
+	pthread_mutex_t mtx;
+
+	int default_qsl_drop_index; /* pre allocated default QSL Drop */
+	int default_qsl_discard_index; /* pre allocated default QSL Discard */
+	/* RSS hash function settings bitfields correspond to data used for hashing */
+	struct nt_eth_rss
+		rss_hash_config;
+	struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+	ERR_SUCCESS = 0,
+	ERR_FAILED = 1,
+	ERR_MEMORY = 2,
+	ERR_OUTPUT_TOO_MANY = 3,
+	ERR_RSS_TOO_MANY_QUEUES = 4,
+	ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+	ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+	ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+	ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+	ERR_MATCH_VLAN_TOO_MANY = 9,
+	ERR_MATCH_INVALID_IPV6_HDR = 10,
+	ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+	ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+	ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+	ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+	ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+	ERR_ACTION_REPLICATION_FAILED = 16,
+	ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+	ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+	ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+	ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+	ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+	ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+	ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+	ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+	ERR_MATCH_ENTROPY_FAILED = 25,
+	ERR_MATCH_CAM_EXHAUSTED = 26,
+	ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+	ERR_ACTION_UNSUPPORTED = 28,
+	ERR_REMOVE_FLOW_FAILED = 29,
+	ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+	ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+	ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+	ERR_OUTPUT_INVALID = 33,
+	ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+	ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+	ERR_MATCH_KCC_KEY_CLASH = 36,
+	ERR_MATCH_CAT_CAM_FAILED = 37,
+	ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+	ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+	ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+	do { \
+		uint8_t *_temp_arr = (arr); \
+		size_t _temp_x = (x); \
+		_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+		(uint8_t)(1 << (_temp_x % 8))); \
+	} while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+	do { \
+		size_t _temp_x = (x); \
+		arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+	} while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+	({ \
+		size_t _temp_x = (x); \
+		(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+	})
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+	do { \
+		struct flow_nic_dev *_temp_ndev = (_ndev); \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+		== 0); \
+		flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+	do { \
+		__typeof__(res_type) _temp_res_type = (res_type); \
+		size_t _temp_index = (index); \
+		NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+		dbg_res_descr[_temp_res_type], _temp_index); \
+		flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+	} while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+	(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+				  enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+				   enum res_type_e res_type, unsigned int num,
+				   uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			  int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+			    int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+				     enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+				  enum res_type_e res_type,
+				  struct flow_handle *fh, uint32_t count,
+				  uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+					enum res_type_e res_type, int idx,
+					struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+			enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+			       struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+		   uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+		       uint64_t size);
+
+#endif
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
new file mode 100644
index 0000000000..82d7f8b1c9
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c
@@ -0,0 +1,5130 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+			    int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			      fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					1);
+	}
+
+	return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+			     int flow_type, int lookup, int enable)
+{
+	const int max_lookups = 4;
+	const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+	int fte_index =
+		(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+	int fte_field = cfn_index % cat_funcs;
+
+	uint32_t current_bm = 0;
+	uint32_t fte_field_bm = 1 << fte_field;
+
+	hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+			       fte_index, &current_bm);
+
+	uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+			    (~fte_field_bm & current_bm);
+
+	if (current_bm != final_bm) {
+		hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fte_index, final_bm);
+		hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+					 1);
+	}
+
+	return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+	for (int i = 0; i < dev->num_queues; ++i) {
+		if (dev->rx_queue[i].id == id)
+			return dev->rx_queue[i].hw_id;
+	}
+	return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+	int success = 0;
+
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+			       0x10);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Wait for ddr4 calibration/init done */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM calibration failed"; */
+		return -1;
+	}
+
+	/* Set the flow scrubber and timeout settings */
+	hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+	hw_mod_flm_timeout_flush(&ndev->be);
+
+	hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+	hw_mod_flm_scrub_flush(&ndev->be);
+
+	return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+	int success = 0;
+
+	/*
+	 * Make sure no lookup is performed during init, i.e.
+	 * disable every category and disable FLM
+	 */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+	hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+	/* Wait for FLM to enter Idle state */
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - never idle"; */
+		return -1;
+	}
+
+	success = 0;
+
+	/* Start SDRAM initialization */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	for (uint32_t i = 0; i < 1000000; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_status_update(&ndev->be);
+		hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+				      &value);
+		if (value) {
+			success = 1;
+			break;
+		}
+		usleep(1);
+	}
+
+	if (!success) {
+		/* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+		return -1;
+	}
+
+	/* Set the INIT value back to zero to clear the bit in the SW register cache */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	/* Enable FLM */
+	hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+	hw_mod_flm_control_flush(&ndev->be);
+
+	return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+	union {
+		struct {
+			uint64_t in_use : 1;
+			uint64_t drop : 1;
+			uint64_t ltx_en : 1;
+			uint64_t ltx_port : 1;
+			uint64_t queue_en : 1;
+			uint64_t queue : 8;
+			uint64_t encap_len : 8;
+			uint64_t encap_vlans : 2;
+			uint64_t encap_ip : 1;
+			uint64_t decap_end : 5;
+			uint64_t jump_to_group : 8;
+			uint64_t pad : 27;
+		};
+		uint64_t data;
+	};
+};
+
+struct flm_flow_key_def_s {
+	union {
+		struct {
+			uint64_t qw0_dyn : 7;
+			uint64_t qw0_ofs : 8;
+			uint64_t qw4_dyn : 7;
+			uint64_t qw4_ofs : 8;
+			uint64_t sw8_dyn : 7;
+			uint64_t sw8_ofs : 8;
+			uint64_t sw9_dyn : 7;
+			uint64_t sw9_ofs : 8;
+			uint64_t outer_proto : 1;
+			uint64_t inner_proto : 1;
+			uint64_t pad : 2;
+		};
+		uint64_t data;
+	};
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+	struct flm_flow_ft_ident_s ft_ident;
+
+	assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+	memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				ft_ident.ltx_en = 1;
+				ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				ft_ident.queue_en = 1;
+				ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	if (fd->tun_hdr.len > 0) {
+		ft_ident.encap_len = fd->tun_hdr.len;
+		ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	if (fd->jump_to_group != UINT32_MAX)
+		ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+	return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+				  unsigned int qw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(qw < 2);
+	if (qw == 0) {
+		key_def->qw0_dyn = dyn & 0x7f;
+		key_def->qw0_ofs = ofs & 0xff;
+	} else {
+		key_def->qw4_dyn = dyn & 0x7f;
+		key_def->qw4_ofs = ofs & 0xff;
+	}
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+				  unsigned int sw, unsigned int dyn,
+				  unsigned int ofs)
+{
+	assert(sw < 2);
+	if (sw == 0) {
+		key_def->sw8_dyn = dyn & 0x7f;
+		key_def->sw8_ofs = ofs & 0xff;
+	} else {
+		key_def->sw9_dyn = dyn & 0x7f;
+		key_def->sw9_ofs = ofs & 0xff;
+	}
+}
+
+struct flm_flow_group_s {
+	int cfn_group0;
+	int km_ft_group0;
+	struct flow_handle *fh_group0;
+
+	struct flm_flow_key_def_s key_def;
+
+	int miss_enabled;
+
+	struct flm_flow_group_ft_s {
+		struct flm_flow_ft_ident_s ident;
+		struct flow_handle *fh;
+	} ft[FLM_FLOW_FT_MAX];
+
+	uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+	struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+	struct flm_flow_handle_s *flm_handle;
+
+	if (!*handle)
+		*handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+	else
+		memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+	flm_handle = (struct flm_flow_handle_s *)*handle;
+
+	for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+		flm_handle->groups[i].cfn_group0 = -1;
+		flm_handle->groups[i].fh_group0 = NULL;
+	}
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+	free(*handle);
+	*handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+				int cfn, int km_ft, struct flow_handle *fh)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	flm_group->cfn_group0 = cfn;
+	flm_group->km_ft_group0 = km_ft;
+	flm_group->fh_group0 = fh;
+	flm_group->miss_enabled = 0;
+
+	return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+				  uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+	flm_group->cfn_group0 = -1;
+
+	return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+				      uint32_t group_index,
+				      struct flow_handle **fh_miss)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	*fh_miss = flm_handle->groups[group_index].fh_group0;
+
+	return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+			      struct flm_flow_key_def_s *key_def,
+			      uint32_t *packet_mask, uint32_t group_index)
+{
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	uint32_t flm_mask[10] = {
+		packet_mask[0], /* SW9 */
+		packet_mask[1], /* SW8 */
+		packet_mask[5], packet_mask[4],
+		packet_mask[3], packet_mask[2], /* QW4 */
+		packet_mask[9], packet_mask[8],
+		packet_mask[7], packet_mask[6], /* QW0 */
+	};
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+			   key_def->qw0_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+			   key_def->qw0_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+			   key_def->qw4_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+			   key_def->qw4_ofs);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+			   key_def->sw8_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+			   key_def->sw8_ofs);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+			   key_def->sw9_dyn);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+			   key_def->sw9_ofs);
+
+	hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+				flm_mask);
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+			   group_index + 2);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+			   key_def->outer_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+			   key_def->inner_proto);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+			   -20);
+
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+			   0);
+	hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+	if (flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		/* Change group 0 FLM RCP selection to point to 0 */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       0);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Change group 0 FT MISS to FT UNHANDLED */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+		/* Finally, disable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+				       bm & ~(1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		memset(&flm_group->key_def, 0x0,
+		       sizeof(struct flm_flow_key_def_s));
+		flm_group->miss_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+				  struct flow_handle *fh, uint32_t group_index,
+				  struct flm_flow_key_def_s *key_def,
+				  uint32_t *packet_mask,
+				  /* Return values */
+				  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+				  int *cfn_to_copy_km_ft,
+				  struct flow_handle **fh_existing)
+{
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group;
+	struct flm_flow_ft_ident_s temp_ft_ident;
+	struct nic_flow_def *fd = fh->fd;
+
+	if (group_index >= FLM_FLOW_RCP_MAX) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Invalid index for FLM programming: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	flm_group = &flm_handle->groups[group_index];
+
+	if (flm_group->cfn_group0 < 0) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program to a unset CFN: Group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	if (!flm_group->miss_enabled) {
+		uint32_t bm = 0;
+
+		if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+							(int)group_index, fh)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get FLM RCP resource\n");
+			return -1;
+		}
+
+		/* Change group 0 FLM RCP selection to point to "group_index" */
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, flm_group->cfn_group0,
+				       group_index);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0, 1);
+
+		/* Setup FLM RCP "group_index" */
+		flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+		/*
+		 * Change group 0 FT UNHANDLED to FT MISS
+		 * Note: Once this step is done, the filter is invalid until the KCE step is done
+		 */
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+		set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+		/* Finally, enable FLM for group 0 */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       flm_group->cfn_group0 / 8,
+				       bm | (1 << (flm_group->cfn_group0 % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 flm_group->cfn_group0 / 8, 1);
+
+		flm_group->key_def.data = key_def->data;
+		flm_group->miss_enabled = 1;
+	}
+
+	if (flm_group->key_def.data != key_def->data) {
+		NT_LOG(ERR, FILTER,
+		       "FLM: Attempt to program 2 different types of flows into group=%d\n",
+		       (int)group_index);
+		return -1;
+	}
+
+	/* Create action set */
+	memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+	temp_ft_ident.in_use = 1;
+
+	if (fd->dst_num_avail == 0) {
+		temp_ft_ident.drop = 1;
+	} else {
+		for (int i = 0; i < fd->dst_num_avail; ++i) {
+			if (fd->dst_id[i].type == PORT_PHY) {
+				temp_ft_ident.ltx_en = 1;
+				temp_ft_ident.ltx_port = fd->dst_id[i].id;
+			} else if (fd->dst_id[i].type == PORT_VIRT) {
+				temp_ft_ident.queue_en = 1;
+				temp_ft_ident.queue = fd->dst_id[i].id;
+			}
+		}
+	}
+
+	/* Set encap/decap data */
+	if (fd->tun_hdr.len > 0) {
+		temp_ft_ident.encap_len = fd->tun_hdr.len;
+		temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+		temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+	}
+
+	temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+	/* Find ft ident or create a new one */
+	uint32_t ft_index = 0;
+
+	if (flm_group->cashed_ft_index > 0 &&
+			flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+			temp_ft_ident.data) {
+		ft_index = flm_group->cashed_ft_index;
+		*fh_existing = flm_group->ft[ft_index].fh;
+	} else {
+		for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+			struct flm_flow_ft_ident_s *ft_ident =
+					&flm_group->ft[ft_index].ident;
+			if (ft_ident->data == 0) {
+				ft_ident->data = temp_ft_ident.data;
+				*cfn_to_copy = flm_group->cfn_group0;
+				*cfn_to_copy_km_ft = flm_group->km_ft_group0;
+				flm_group->ft[ft_index].fh = fh;
+				fh->flm_group_index = (uint8_t)group_index;
+				fh->flm_ft_index = (uint8_t)ft_index;
+				break;
+			} else if (ft_ident->data == temp_ft_ident.data) {
+				*fh_existing = flm_group->ft[ft_index].fh;
+				break;
+			}
+		}
+
+		if (ft_index >= FLM_FLOW_FT_MAX) {
+			NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+			return -1;
+		}
+
+		flm_group->cashed_ft_index = ft_index;
+	}
+
+	/* Set return values */
+	 *kid = group_index + 2;
+	 *ft = ft_index;
+
+	return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+				  struct flow_handle *fh)
+{
+	int error = 0;
+
+	struct flm_flow_handle_s *flm_handle =
+		(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+	struct flm_flow_group_s *flm_group =
+			&flm_handle->groups[fh->flm_group_index];
+
+	memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+	       sizeof(struct flm_flow_group_ft_s));
+
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   flm_group->km_ft_group0, 0, 0);
+	error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				   (int)fh->flm_ft_index, 2, 0);
+
+	return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+	struct dual_buckets_s {
+		uint16_t rate_a;
+		uint16_t rate_b;
+		uint16_t size_a;
+		uint16_t size_b;
+	} dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+	return hw_mod_flm_present(&dev->ndev->be) &&
+	       dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+	return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+	/* Assumes a 40-bit int as input */
+	uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+	uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+	uint64_t round_up =
+		(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+	return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+			 uint64_t bucket_rate_a, uint64_t bucket_size_a,
+			 uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+	struct flow_nic_dev *ndev = dev->ndev;
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	uint64_t bucket_rate_shift_a = 0;
+	uint64_t bucket_rate_shift_b = 0;
+
+	uint64_t bucket_size_shift_a = 0;
+	uint64_t bucket_size_shift_b = 0;
+
+	/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+	bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+			(bucket_rate_a >> 7);
+	bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+			(bucket_rate_b >> 7);
+
+	/* Round rate down to max rate supported */
+	if (bucket_rate_a > 0x7ff8000)
+		bucket_rate_a = 0x7ff8000;
+	if (bucket_rate_b > 0x7ff8000)
+		bucket_rate_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+		bucket_rate_shift_a += 1;
+	while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+		bucket_rate_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+			  (bucket_rate_shift_a << 12);
+	buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+			  (bucket_rate_shift_b << 12);
+
+	/* Round size down to 38-bit int */
+	if (bucket_size_a > 0x3fffffffff)
+		bucket_size_a = 0x3fffffffff;
+	if (bucket_size_b > 0x3fffffffff)
+		bucket_size_b = 0x3fffffffff;
+
+	/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+	bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+	bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+	/* Round rate down to max rate supported */
+	if (bucket_size_a > 0x7ff8000)
+		bucket_size_a = 0x7ff8000;
+	if (bucket_size_b > 0x7ff8000)
+		bucket_size_b = 0x7ff8000;
+
+	/* Find shift to convert into 12-bit int */
+	while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+		bucket_size_shift_a += 1;
+	while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+		bucket_size_shift_b += 1;
+
+	/* Store in format [11:0] shift-left [15:12] */
+	buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+			  (bucket_size_shift_a << 12);
+	buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+			  (bucket_size_shift_b << 12);
+
+	return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+			UNUSED uint32_t policy_id, UNUSED int drop)
+{
+	return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+	(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+	return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+	struct dual_buckets_s *buckets;
+
+	uint64_t n_pkt;
+	uint64_t n_bytes;
+	uint64_t n_pkt_base;
+	uint64_t n_bytes_base;
+	uint64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+			  struct flm_v17_lrn_data_s *learn_record)
+{
+	uint32_t lrn_ready;
+	uint32_t retry = 0;
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+				&lrn_ready);
+	if (lrn_ready < WORDS_PER_LEARN_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+		while (lrn_ready < WORDS_PER_LEARN_DATA) {
+			++retry;
+			if (retry > FLM_PROG_MAX_RETRY)
+				return 1;
+
+			flm_read_inf_rec_locked(dev, data);
+
+			hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+			hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+						HW_FLM_BUF_CTRL_LRN_FREE,
+						&lrn_ready);
+		}
+	}
+
+	int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+						HW_FLM_FLOW_LRN_DATA_V17,
+						(uint32_t *)learn_record);
+	return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+			  uint32_t profile_id, UNUSED uint32_t policy_id,
+			  uint64_t stats_mask)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_flow_mtr_handle_s *handle =
+		(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+	struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = buckets->rate_a;
+	learn_record.size = buckets->size_a;
+	learn_record.fill = buckets->size_a & 0x0fff;
+
+	learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+	learn_record.ent = 1;
+	learn_record.op = 1;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	if (stats_mask)
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	if (res == 0) {
+		struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+		mtr_stat[mtr_id].buckets = buckets;
+		__atomic_store_n(&mtr_stat[mtr_id].stats_mask, stats_mask, __ATOMIC_RELAXED);
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.ent = 1;
+	learn_record.op = 0;
+	learn_record.eor = 1;
+
+	learn_record.id[0] = mtr_id & 0xff;
+	learn_record.id[1] = (mtr_id >> 8) & 0xff;
+	learn_record.id[2] = (mtr_id >> 16) & 0xff;
+	learn_record.id[3] = (mtr_id >> 24) & 0xff;
+	learn_record.id[8] = 1U << 7;
+
+	/* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	__atomic_store_n(&mtr_stat[mtr_id].stats_mask, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_bytes, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&mtr_stat[mtr_id].n_pkt, 0, __ATOMIC_RELAXED);
+	mtr_stat[mtr_id].n_bytes_base = 0;
+	mtr_stat[mtr_id].n_pkt_base = 0;
+	mtr_stat[mtr_id].buckets = NULL;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+			 uint32_t adjust_value)
+{
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat =
+		&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+	struct flm_v17_lrn_data_s learn_record;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.sw9 = mtr_id + 1;
+	learn_record.kid = 1;
+
+	learn_record.rate = mtr_stat->buckets->rate_a;
+	learn_record.size = mtr_stat->buckets->size_a;
+	learn_record.adj = adjust_value;
+
+	learn_record.ft_mbr = 15;
+
+	learn_record.ent = 1;
+	learn_record.op = 2;
+	learn_record.eor = 1;
+
+	if (__atomic_load_n(&mtr_stat->stats_mask, __ATOMIC_RELAXED))
+		learn_record.vol_idx = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+					uint32_t *data)
+{
+	uint32_t inf_cnt = 0;
+
+	hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+				&inf_cnt);
+	if (inf_cnt < WORDS_PER_INF_DATA) {
+		hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+		hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+					HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+	}
+
+	uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+	if (records_to_read == 0)
+		return 0;
+	if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+		records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+	hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+				       data,
+				       records_to_read * WORDS_PER_INF_DATA);
+
+	return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+	uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+	for (uint32_t i = 0; i < records; ++i) {
+		uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+		/* Check that received record hold valid meter statistics */
+		if ((p_record[6] < flow_mtr_meters_supported() &&
+				p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+			uint32_t id = p_record[6];
+
+			/* Don't update a deleted meter */
+			uint64_t stats_mask =
+				__atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+			if (stats_mask) {
+				uint64_t nb = ((uint64_t)p_record[1] << 32) |
+					      p_record[0];
+				uint64_t np = ((uint64_t)p_record[3] << 32) |
+					      p_record[2];
+
+				__atomic_store_n(&mtr_stat[id].n_pkt,
+					     np | UINT64_MSB, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_bytes, nb, __ATOMIC_RELAXED);
+				__atomic_store_n(&mtr_stat[id].n_pkt, np, __ATOMIC_RELAXED);
+			}
+		}
+	}
+
+	return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+			uint64_t *stats_mask, uint64_t *green_pkt,
+			uint64_t *green_bytes, int clear)
+{
+	struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+	*stats_mask = __atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+	if (*stats_mask) {
+		uint64_t pkt_1;
+		uint64_t pkt_2;
+		uint64_t nb;
+
+		do {
+			do {
+				pkt_1 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+			} while (pkt_1 & UINT64_MSB);
+			nb = __atomic_load_n(&mtr_stat[id].n_bytes, __ATOMIC_RELAXED);
+			pkt_2 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+		} while (pkt_1 != pkt_2);
+
+		*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+		*green_bytes = nb - mtr_stat[id].n_bytes_base;
+		if (clear) {
+			mtr_stat[id].n_pkt_base = pkt_1;
+			mtr_stat[id].n_bytes_base = nb;
+		}
+	}
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+	return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+				     uint32_t port_id)
+{
+	struct flow_eth_dev *dev = ndev->eth_base;
+
+	while (dev) {
+		if (dev->port_id == port_id)
+			return dev->port;
+		dev = dev->next;
+	}
+
+	return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	if (ndev->flow_base)
+		ndev->flow_base->prev = fh;
+	fh->next = ndev->flow_base;
+	fh->prev = NULL;
+	ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+	struct flow_handle *next = fh->next;
+	struct flow_handle *prev = fh->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base == fh) {
+		ndev->flow_base = NULL;
+	}
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh)
+{
+	if (ndev->flow_base_flm)
+		ndev->flow_base_flm->prev = fh;
+	fh->next = ndev->flow_base_flm;
+	fh->prev = NULL;
+	ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+				struct flow_handle *fh_flm)
+{
+	struct flow_handle *next = fh_flm->next;
+	struct flow_handle *prev = fh_flm->prev;
+
+	if (next && prev) {
+		prev->next = next;
+		next->prev = prev;
+	} else if (next) {
+		ndev->flow_base_flm = next;
+		next->prev = NULL;
+	} else if (prev) {
+		prev->next = NULL;
+	} else if (ndev->flow_base_flm == fh_flm) {
+		ndev->flow_base_flm = NULL;
+	}
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+	struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+	const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+	if (vlan_spec != NULL && vlan_mask != NULL) {
+		if (vlan_mask->tci) {
+			if (implicit_vlan_vid > 0) {
+				NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+					"for implicit VLAN patterns.\n");
+				flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+				free(fd);
+				return 1;
+			}
+
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohs(vlan_mask->tci);
+			sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+			sw_counter += 1;
+		}
+	}
+
+	fd->vlans += 1;
+	return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+	uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+	uint32_t any_count)
+{
+	const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+	const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+	if (ipv4_spec != NULL && ipv4_mask != NULL) {
+		if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+			fd->fragmentation = 0xfe;
+
+		if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+			qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+			qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+			qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+			qw_counter += 1;
+		} else {
+			if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+				(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			if (ipv4_mask->hdr.src_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+				sw_counter += 1;
+			}
+
+			if (ipv4_mask->hdr.dst_ip) {
+				uint32_t *sw_data = &packet_data[1 - sw_counter];
+				uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+				sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+				sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+				km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+				set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+				sw_counter += 1;
+			}
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+	else
+		fd->l3_prot = PROT_L3_IPV4;
+	return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+	const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+	if (ipv6_spec != NULL && ipv6_mask != NULL) {
+		if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+			qw_counter += 1;
+		}
+
+		if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+			if (qw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+			uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+			memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+			memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+			qw_data[0] = ntohl(qw_data[0]);
+			qw_data[1] = ntohl(qw_data[1]);
+			qw_data[2] = ntohl(qw_data[2]);
+			qw_data[3] = ntohl(qw_data[3]);
+
+			qw_mask[0] = ntohl(qw_mask[0]);
+			qw_mask[1] = ntohl(qw_mask[1]);
+			qw_mask[2] = ntohl(qw_mask[2]);
+			qw_mask[3] = ntohl(qw_mask[3]);
+			qw_data[0] &= qw_mask[0];
+			qw_data[1] &= qw_mask[1];
+			qw_data[2] &= qw_mask[2];
+			qw_data[3] &= qw_mask[3];
+
+			km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+			set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+			qw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l3_prot != -1)
+		fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+	else
+		fd->l3_prot = PROT_L3_IPV6;
+	return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+	const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+	if (udp_spec != NULL && udp_mask != NULL) {
+		if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+				ntohs(udp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+				ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_UDP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+	const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+	if (sctp_spec != NULL && sctp_mask != NULL) {
+		if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+				ntohs(sctp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+				ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_SCTP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+	const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+	const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+	if (tcp_spec != NULL && tcp_mask != NULL) {
+		if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+				ntohs(tcp_mask->hdr.dst_port);
+			sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+				ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+			set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+			sw_counter += 1;
+		}
+	}
+
+	if (any_count > 0 || fd->l4_prot != -1) {
+		fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+		key_def->inner_proto = 1;
+	} else {
+		fd->l4_prot = PROT_L4_TCP;
+		key_def->outer_proto = 1;
+	}
+	return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+	struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+	const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+	if (gtp_spec != NULL && gtp_mask != NULL) {
+		if (gtp_mask->teid) {
+			if (sw_counter >= 2) {
+				NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+				flow_nic_set_error(ERR_FAILED, error);
+				free(fd);
+				return 1;
+			}
+
+			uint32_t *sw_data = &packet_data[1 - sw_counter];
+			uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+			sw_mask[0] = ntohl(gtp_mask->teid);
+			sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+			km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+			set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+			sw_counter += 1;
+		}
+	}
+
+	fd->tunnel_prot = PROT_TUN_GTPV1U;
+	return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error, uint16_t implicit_vlan_vid,
+	uint32_t *in_port_id, uint32_t *num_dest_port,
+	uint32_t *num_queues, uint32_t *packet_data,
+	uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t any_count = 0;
+	int mtr_count = 0;
+
+	unsigned int encap_decap_order = 0;
+
+	unsigned int qw_counter = 0;
+	unsigned int sw_counter = 0;
+
+	uint64_t modify_field_use_flags = 0x0;
+
+	*in_port_id = UINT32_MAX;
+	*num_dest_port = 0;
+	*num_queues = 0;
+
+	memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+	memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+	key_def->data = 0;
+
+	if (action == NULL || elem == NULL) {
+		flow_nic_set_error(ERR_FAILED, error);
+		NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+		return NULL;
+	}
+
+	struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+	if (!fd) {
+		flow_nic_set_error(ERR_MEMORY, error);
+		NT_LOG(ERR, FILTER, "ERR Memory\n");
+		return NULL;
+	}
+
+	/* Set default values for fd */
+	fd->full_offload = -1;
+	fd->in_port_override = -1;
+	fd->mark = UINT32_MAX;
+	fd->jump_to_group = UINT32_MAX;
+
+	fd->l2_prot = -1;
+	fd->l3_prot = -1;
+	fd->l4_prot = -1;
+	fd->vlans = 0;
+	fd->tunnel_prot = -1;
+	fd->tunnel_l3_prot = -1;
+	fd->tunnel_l4_prot = -1;
+	fd->fragmentation = -1;
+
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fd);
+
+	/*
+	 * Gather flow match + actions and convert into internal flow definition structure
+	 * (struct nic_flow_def_s)
+	 * This is the 1st step in the flow creation - validate, convert and prepare
+	 */
+	for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+		switch (action[aidx].type) {
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+			if (action[aidx].conf) {
+				uint32_t port_id =
+					((const struct flow_action_port_id *)
+					 action[aidx]
+					 .conf)
+					->id;
+				uint8_t port = get_port_from_port_id(dev->ndev,
+								     port_id);
+
+				if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+					/* ERROR too many output destinations */
+					NT_LOG(ERR, FILTER,
+					       "Too many output destinations\n");
+					flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (port >= dev->ndev->be.num_phy_ports) {
+					/* ERROR phy port out of range */
+					NT_LOG(ERR, FILTER,
+					       "Phy port out of range\n");
+					flow_nic_set_error(ERR_OUTPUT_INVALID,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* New destination port to add */
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					port_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+				fd->dst_id[fd->dst_num_avail].id = (int)port;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				if (fd->flm_mtu_fragmentation_recipe == 0) {
+					fd->flm_mtu_fragmentation_recipe =
+						convert_port_to_ifr_mtu_recipe(port);
+				}
+
+				if (fd->full_offload < 0)
+					fd->full_offload = 1;
+
+				*num_dest_port += 1;
+
+				NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+				       (int)port);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_queue *queue =
+					(const struct flow_action_queue *)
+					action[aidx]
+					.conf;
+
+				int hw_id = rx_queue_idx_to_hw_id(dev,
+								  queue->index);
+
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					dev->port;
+				fd->dst_id[fd->dst_num_avail].id = hw_id;
+				fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+				fd->dst_id[fd->dst_num_avail].active = 1;
+				fd->dst_num_avail++;
+
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+				       dev, dev->port, queue->index, hw_id);
+
+				fd->full_offload = 0;
+				*num_queues += 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_rss *rss =
+					(const struct flow_action_rss *)
+					action[aidx]
+					.conf;
+
+				for (uint32_t i = 0; i < rss->queue_num; ++i) {
+					int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+					fd->dst_id[fd->dst_num_avail]
+					.owning_port_id = dev->port;
+					fd->dst_id[fd->dst_num_avail].id =
+						hw_id;
+					fd->dst_id[fd->dst_num_avail].type =
+						PORT_VIRT;
+					fd->dst_id[fd->dst_num_avail].active =
+						1;
+					fd->dst_num_avail++;
+				}
+
+				fd->full_offload = 0;
+				*num_queues += rss->queue_num;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->mark = ((const struct flow_action_mark *)
+					    action[aidx]
+					    .conf)
+					   ->id;
+				NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_JUMP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_jump *jump =
+					(const struct flow_action_jump *)
+					action[aidx]
+					.conf;
+				fd->jump_to_group = jump->group;
+				NT_LOG(DBG, FILTER,
+				       "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+				       dev, jump->group);
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+			       dev);
+			if (action[aidx].conf) {
+				fd->dst_id[fd->dst_num_avail].owning_port_id =
+					0;
+				fd->dst_id[fd->dst_num_avail].id = 0;
+				fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+				fd->dst_num_avail++;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_METER:
+			NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+			       dev);
+			if (action[aidx].conf) {
+				const struct flow_action_meter *meter =
+					(const struct flow_action_meter *)
+					action[aidx]
+					.conf;
+				if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Number of METER actions exceeds %d.\n",
+					       MAX_FLM_MTRS_SUPPORTED);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+				fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)
+					action[aidx]
+					.conf;
+				const struct flow_elem *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+						encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP data/size invalid.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+				memcpy(fd->tun_hdr.d.hdr8, encap->data,
+				       fd->tun_hdr.len);
+
+				while (items->type != FLOW_ELEM_TYPE_END) {
+					switch (items->type) {
+					case FLOW_ELEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+					case FLOW_ELEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+					case FLOW_ELEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+						fd->tun_hdr.new_outer = 1;
+						break;
+					case FLOW_ELEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+						break;
+					case FLOW_ELEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+						break;
+					default:
+						break;
+					}
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - Encapsulation with %d vlans not supported.\n",
+					       (int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16;
+						++i) {
+					uint8_t *data =
+						fd->tun_hdr.d.hdr8 + i * 16;
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+			if (action[aidx].conf) {
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)
+					action[aidx]
+					.conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: - RAW_DECAP must decap something.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				encap_decap_order = 1;
+
+				fd->header_strip_start_dyn = 2;
+				fd->header_strip_start_ofs = 2;
+
+				switch (decap->items[decap->item_count - 2]
+						.type) {
+				case FLOW_ELEM_TYPE_ETH:
+				case FLOW_ELEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = 4;
+					fd->header_strip_end_ofs = 0;
+					break;
+				case FLOW_ELEM_TYPE_IPV4:
+				case FLOW_ELEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = 7;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_SCTP:
+				case FLOW_ELEM_TYPE_TCP:
+				case FLOW_ELEM_TYPE_UDP:
+				case FLOW_ELEM_TYPE_ICMP:
+					fd->header_strip_end_dyn = 8;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				case FLOW_ELEM_TYPE_GTP:
+					fd->header_strip_end_dyn = 13;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				default:
+					fd->header_strip_end_dyn = 1;
+					fd->header_strip_end_ofs = 0;
+					fd->header_strip_removed_outer_ip = 1;
+					break;
+				}
+			}
+			break;
+
+		case FLOW_ACTION_TYPE_MODIFY_FIELD:
+			NT_LOG(DBG, FILTER,
+			       "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+			{
+				const struct flow_action_modify_field *modify_field =
+					(const struct flow_action_modify_field *)
+					action[aidx]
+					.conf;
+				uint64_t modify_field_use_flag = 0;
+
+				if (modify_field->src.field !=
+						FLOW_FIELD_VALUE) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only src type VALUE is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.level > 2) {
+					NT_LOG(ERR, FILTER,
+					       "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+							   error);
+					free(fd);
+					return NULL;
+				}
+
+				if (modify_field->dst.field ==
+						FLOW_FIELD_IPV4_TTL ||
+						modify_field->dst.field ==
+						FLOW_FIELD_IPV6_HOPLIMIT) {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SUB) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->ttl_sub_enable) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					fd->ttl_sub_enable = 1;
+					fd->ttl_sub_ipv4 =
+						(modify_field->dst.field ==
+						 FLOW_FIELD_IPV4_TTL) ?
+						1 :
+						0;
+					fd->ttl_sub_outer =
+						(modify_field->dst.level <= 1) ?
+						1 :
+						0;
+				} else {
+					if (modify_field->operation !=
+							FLOW_MODIFY_SET) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD only operation SET "
+						       "is supported in general.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					if (fd->modify_field_count >=
+							dev->ndev->be.tpe.nb_cpy_writers) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD exceeded maximum of %u"
+						       " MODIFY_FIELD actions.\n",
+						       dev->ndev->be.tpe
+						       .nb_cpy_writers);
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					switch (modify_field->dst.field) {
+					case FLOW_FIELD_IPV4_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 1;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV6_DSCP:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_DSCP_IPV6;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						/*
+						 * len=2 is needed because IPv6 DSCP overlaps 2
+						 * bytes.
+						 */
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_PSC_QFI:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_RQI_QFI;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 14;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 1;
+						break;
+					case FLOW_FIELD_IPV4_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 12;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_IPV4_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_IPV4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L3;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 16;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					case FLOW_FIELD_TCP_PORT_SRC:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_SRC:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 0;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_TCP_PORT_DST:
+					/* fallthrough */
+					case FLOW_FIELD_UDP_PORT_DST:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_PORT;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn = DYN_L4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 2;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 2;
+						break;
+					case FLOW_FIELD_GTP_TEID:
+						fd->modify_field
+						[fd->modify_field_count]
+						.select =
+							CPY_SELECT_TEID;
+						fd->modify_field
+						[fd->modify_field_count]
+						.dyn =
+							DYN_L4_PAYLOAD;
+						fd->modify_field
+						[fd->modify_field_count]
+						.ofs = 4;
+						fd->modify_field
+						[fd->modify_field_count]
+						.len = 4;
+						break;
+					default:
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type is not supported.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					modify_field_use_flag =
+						1
+						<< fd->modify_field
+						[fd->modify_field_count]
+						.select;
+					if (modify_field_use_flag &
+							modify_field_use_flags) {
+						NT_LOG(ERR, FILTER,
+						       "MODIFY_FIELD dst type hardware "
+						       "resource already used.\n");
+						flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+								   error);
+						free(fd);
+						return NULL;
+					}
+
+					memcpy(fd->modify_field
+					       [fd->modify_field_count]
+					       .value8,
+					       modify_field->src.value, 16);
+
+					fd->modify_field[fd->modify_field_count]
+					.level =
+						modify_field->dst.level;
+
+					modify_field_use_flags |=
+						modify_field_use_flag;
+					fd->modify_field_count += 1;
+				}
+			}
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow action received - %i\n",
+			       action[aidx].type);
+			flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+		NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+		free(fd);
+		return NULL;
+	}
+
+	if (implicit_vlan_vid > 0) {
+		uint32_t *sw_data = &packet_data[1 - sw_counter];
+		uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+		sw_mask[0] = 0x0fff;
+		sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+		km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+				  DYN_FIRST_VLAN, 0);
+		set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+		sw_counter += 1;
+
+		fd->vlans += 1;
+	}
+
+	/*
+	 * All Actions interpreted
+	 */
+	for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+		switch (elem[eidx].type) {
+		case FLOW_ELEM_TYPE_ANY:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_any *any_spec =
+					(const struct flow_elem_any *)elem[eidx]
+					.spec;
+				const struct flow_elem_any *any_mask =
+					(const struct flow_elem_any *)elem[eidx]
+					.mask;
+
+				if (any_spec && any_mask) {
+					any_count += any_spec->num &
+						     any_mask->num;
+				}
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				const struct flow_elem_eth *eth_spec =
+					(const struct flow_elem_eth *)elem[eidx]
+					.spec;
+				const struct flow_elem_eth *eth_mask =
+					(const struct flow_elem_eth *)elem[eidx]
+					.mask;
+
+				if (any_count > 0) {
+					NT_LOG(ERR, FILTER,
+					       "Tunneled L2 ethernet not supported\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (qw_counter >= 2) {
+					NT_LOG(ERR, FILTER,
+					       "Key size too big. Out of QW resources.\n");
+					flow_nic_set_error(ERR_FAILED, error);
+					free(fd);
+					return NULL;
+				}
+
+				if (eth_spec != NULL && eth_mask != NULL) {
+					if (is_non_zero(eth_mask->d_addr.addr_b,
+							6) ||
+							is_non_zero(eth_mask->s_addr.addr_b,
+								    6)) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+								       qw_counter *
+								       4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+								       qw_counter *
+								       4];
+
+						qw_data[0] =
+							((eth_spec->d_addr
+							  .addr_b[0] &
+							  eth_mask->d_addr
+							  .addr_b[0])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[1] &
+							  eth_mask->d_addr
+							  .addr_b[1])
+							 << 16) +
+							((eth_spec->d_addr
+							  .addr_b[2] &
+							  eth_mask->d_addr
+							  .addr_b[2])
+							 << 8) +
+							(eth_spec->d_addr
+							 .addr_b[3] &
+							 eth_mask->d_addr
+							 .addr_b[3]);
+
+						qw_data[1] =
+							((eth_spec->d_addr
+							  .addr_b[4] &
+							  eth_mask->d_addr
+							  .addr_b[4])
+							 << 24) +
+							((eth_spec->d_addr
+							  .addr_b[5] &
+							  eth_mask->d_addr
+							  .addr_b[5])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[0] &
+							  eth_mask->s_addr
+							  .addr_b[0])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[1] &
+							 eth_mask->s_addr
+							 .addr_b[1]);
+
+						qw_data[2] =
+							((eth_spec->s_addr
+							  .addr_b[2] &
+							  eth_mask->s_addr
+							  .addr_b[2])
+							 << 24) +
+							((eth_spec->s_addr
+							  .addr_b[3] &
+							  eth_mask->s_addr
+							  .addr_b[3])
+							 << 16) +
+							((eth_spec->s_addr
+							  .addr_b[4] &
+							  eth_mask->s_addr
+							  .addr_b[4])
+							 << 8) +
+							(eth_spec->s_addr
+							 .addr_b[5] &
+							 eth_mask->s_addr
+							 .addr_b[5]);
+
+						qw_mask[0] = (eth_mask->d_addr
+							      .addr_b[0]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[1]
+							      << 16) +
+							     (eth_mask->d_addr
+							      .addr_b[2]
+							      << 8) +
+							     eth_mask->d_addr
+							     .addr_b[3];
+
+						qw_mask[1] = (eth_mask->d_addr
+							      .addr_b[4]
+							      << 24) +
+							     (eth_mask->d_addr
+							      .addr_b[5]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[0]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[1];
+
+						qw_mask[2] = (eth_mask->s_addr
+							      .addr_b[2]
+							      << 24) +
+							     (eth_mask->s_addr
+							      .addr_b[3]
+							      << 16) +
+							     (eth_mask->s_addr
+							      .addr_b[4]
+							      << 8) +
+							     eth_mask->s_addr
+							     .addr_b[5];
+
+						km_add_match_elem(&fd->km,
+								  &qw_data[(size_t)(qw_counter *
+								  4)],
+								  &qw_mask[(size_t)(qw_counter *
+								  4)],
+								  3, DYN_L2, 0);
+						set_key_def_qw(key_def,
+							       qw_counter,
+							       DYN_L2, 0);
+						qw_counter += 1;
+					}
+				}
+
+				fd->l2_prot = PROT_L2_ETH2;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+					sw_counter, packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+					sw_counter, packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def, any_count))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+			       dev->ndev->adapter_no, dev->port);
+			{
+				if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+					packet_data, packet_mask, key_def))
+					return NULL;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+			       dev->ndev->adapter_no, dev->port);
+			if (elem[eidx].spec) {
+				*in_port_id =
+					((const struct flow_elem_port_id *)
+					 elem[eidx]
+					 .spec)
+					->id;
+			}
+			break;
+
+		case FLOW_ELEM_TYPE_VOID:
+			NT_LOG(DBG, FILTER,
+			       "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+			       dev->ndev->adapter_no, dev->port);
+			break;
+
+		default:
+			NT_LOG(ERR, FILTER,
+			       "Invalid or unsupported flow request: %d\n",
+			       (int)elem[eidx].type);
+			flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+					   error);
+			free(fd);
+			return NULL;
+		}
+	}
+
+	return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+	/* CFN */
+	{
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+				   0, 0);
+		hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+	}
+
+	/* KM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, cfn / 8,
+				      bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					cfn / 8, 1);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* FLM */
+	{
+		uint32_t bm = 0;
+
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8, &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, cfn / 8,
+				       bm & ~(1 << (cfn % 8)));
+		hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				       KM_FLM_IF_FIRST, cfn, 0);
+
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 cfn / 8, 1);
+		hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+					 1);
+
+		for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+				ft++) {
+			set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+			set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+		}
+	}
+
+	/* CTE / CTS */
+	{
+		uint32_t cte = 0;
+
+		hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+				   &cte);
+
+		if (cte) {
+			const int cts_offset =
+				((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+			hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+					   cfn, 0);
+			hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+			for (int cte_type = 0; cte_type < cts_offset;
+					++cte_type) {
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_A,
+						   cts_offset * cfn + cte_type,
+						   0);
+				hw_mod_cat_cts_set(&dev->ndev->be,
+						   HW_CAT_CTS_CAT_B,
+						   cts_offset * cfn + cte_type,
+						   0);
+			}
+
+			hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+					     cts_offset);
+		}
+	}
+
+	return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+			     const uint32_t *packet_data, uint32_t flm_key_id,
+			     uint16_t rpl_ext_ptr, uint32_t priority)
+{
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	switch (fd->l4_prot) {
+	case PROT_L4_TCP:
+		fh->flm_prot = 6;
+		break;
+	case PROT_L4_UDP:
+		fh->flm_prot = 17;
+		break;
+	case PROT_L4_SCTP:
+		fh->flm_prot = 132;
+		break;
+	case PROT_L4_ICMP:
+		fh->flm_prot = 1;
+		break;
+	default:
+		switch (fd->tunnel_l4_prot) {
+		case PROT_TUN_L4_TCP:
+			fh->flm_prot = 6;
+			break;
+		case PROT_TUN_L4_UDP:
+			fh->flm_prot = 17;
+			break;
+		case PROT_TUN_L4_SCTP:
+			fh->flm_prot = 132;
+			break;
+		case PROT_TUN_L4_ICMP:
+			fh->flm_prot = 1;
+			break;
+		default:
+			fh->flm_prot = 0;
+			break;
+		}
+		break;
+	}
+
+	memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+	fh->flm_kid = flm_key_id;
+	fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+	fh->flm_prio = (uint8_t)priority;
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		switch (fd->modify_field[i].select) {
+		case CPY_SELECT_DSCP_IPV4:
+		/* fallthrough */
+		case CPY_SELECT_DSCP_IPV6:
+			fh->flm_dscp = fd->modify_field[i].value8[0];
+			break;
+		case CPY_SELECT_RQI_QFI:
+			fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+				      0x1;
+			fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+			break;
+		case CPY_SELECT_IPV4:
+			fh->flm_nat_ipv4 =
+				ntohl(fd->modify_field[i].value32[0]);
+			break;
+		case CPY_SELECT_PORT:
+			fh->flm_nat_port =
+				ntohs(fd->modify_field[i].value16[0]);
+			break;
+		case CPY_SELECT_TEID:
+			fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+			break;
+		}
+	}
+
+	fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+	return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+				struct flow_handle *fh, uint32_t *mtr_ids,
+				uint32_t flm_ft, uint32_t flm_op)
+{
+	struct flm_v17_lrn_data_s learn_record;
+
+	if (fh->type != FLOW_HANDLE_TYPE_FLM)
+		return -1;
+
+	memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+	learn_record.qw0[0] = fh->flm_data[9];
+	learn_record.qw0[1] = fh->flm_data[8];
+	learn_record.qw0[2] = fh->flm_data[7];
+	learn_record.qw0[3] = fh->flm_data[6];
+	learn_record.qw4[0] = fh->flm_data[5];
+	learn_record.qw4[1] = fh->flm_data[4];
+	learn_record.qw4[2] = fh->flm_data[3];
+	learn_record.qw4[3] = fh->flm_data[2];
+	learn_record.sw8 = fh->flm_data[1];
+	learn_record.sw9 = fh->flm_data[0];
+	learn_record.prot = fh->flm_prot;
+
+
+	struct flm_v17_mbr_idx_overlay *mbr_id1_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id2_ptr =
+		(struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+	struct flm_v17_mbr_idx_overlay *mbr_id3_ptr =
+		(struct flm_v17_mbr_idx_overlay *)(learn_record.mbr_idx + 7);
+	struct flm_v17_mbr_idx_overlay *mbr_id4_ptr =
+		(struct flm_v17_mbr_idx_overlay *)(learn_record.mbr_idx + 7);
+	if (mtr_ids) {
+		mbr_id1_ptr->a = mtr_ids[0];
+		mbr_id2_ptr->b = mtr_ids[1];
+		mbr_id3_ptr->a = mtr_ids[2];
+		mbr_id4_ptr->b = mtr_ids[3];
+
+		/* Last non-zero mtr is used for statistics */
+		uint8_t mbrs = 0;
+
+		while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+			++mbrs;
+		learn_record.vol_idx = mbrs;
+	}
+
+	learn_record.nat_ip = fh->flm_nat_ipv4;
+	learn_record.nat_port = fh->flm_nat_port;
+	learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+	learn_record.dscp = fh->flm_dscp;
+	learn_record.teid = fh->flm_teid;
+	learn_record.qfi = fh->flm_qfi;
+	learn_record.rqi = fh->flm_rqi;
+	learn_record.color = fh->flm_rpl_ext_ptr &
+			     0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+	learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+			      << 10; /* Bit [13:10] used for MTU recipe */
+
+	learn_record.ent = 0;
+	learn_record.op = flm_op & 0xf;
+	learn_record.prio = fh->flm_prio & 0x3;
+	learn_record.ft = flm_ft;
+	learn_record.kid = fh->flm_kid;
+	learn_record.eor = 1;
+
+	int res = flow_flm_apply(dev, &learn_record);
+	return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+	struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+	struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+	struct flow_handle *flow)
+{
+	if (!identical_flow_found) {
+				/* Find existing KM FT that can be reused */
+		{
+			int found_ft = 0, found_zero = 0;
+
+			struct flm_flow_ft_ident_s *ft_idents =
+				(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+			struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+			for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+				if (ft_ident.data == ft_idents[i].data) {
+					found_ft = i;
+					break;
+				} else if (found_zero == 0 && ft_idents[i].data == 0) {
+					found_zero = i;
+				}
+			}
+
+			if (found_ft) {
+				if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				fh->resource[RES_KM_FLOW_TYPE].count = 1;
+				fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+				fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+			} else if (found_zero) {
+				if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+				found_zero, fh)) {
+					NT_LOG(ERR, FILTER, "ERROR: Could not get "
+					       "KM FLOW TYPE resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					return 1;
+				}
+
+				ft_idents[found_zero].data = ft_ident.data;
+			} else {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+		}
+		/* Attach resources to KM entry */
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		/* _update existing KM RCP or allocate a new RCP */
+		if (found_flow != NULL) {
+			if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+				->resource[RES_KM_CATEGORY].index)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+				       "KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			fh->resource[RES_KM_CATEGORY].count = 1;
+			fh->resource[RES_KM_CATEGORY].index =
+				found_flow->resource[RES_KM_CATEGORY].index;
+			fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+			if (fd->km.target == KM_CAM) {
+				uint32_t ft_a_mask = 0;
+
+				hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+				hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+					fh->resource[RES_KM_CATEGORY].index, 0,
+					ft_a_mask | (1 << fd->km.flow_type));
+			}
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+				NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+				return 1;
+			}
+
+			/* Note: km_rcp_set clears existing RCPs */
+			km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+		}
+
+		/* Set filter setup variables */
+		*setup_km = 1;
+		*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+		/* _flush KM RCP and entry */
+		hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+		km_write_data_match_entry(&fd->km, 0);
+	} else {
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+			found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_FLOW_TYPE].count = 1;
+		fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+		fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+		if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+			found_flow->resource[RES_KM_CATEGORY].index)) {
+			NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return 1;
+		}
+
+		fh->resource[RES_KM_CATEGORY].count = 1;
+		fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+		fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+		km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+		fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+		km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+		*setup_km = 1;
+		*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+		*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+	}
+	return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+	uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+	const uint32_t l4_length, uint32_t *dyn)
+{
+	if (fd->tun_hdr.len > eth_length) {
+		if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+			ofs += fd->tun_hdr.len - eth_length;
+		} else {
+			switch (select) {
+			case CPY_SELECT_IPV4:
+			case CPY_SELECT_DSCP_IPV4:
+			case CPY_SELECT_DSCP_IPV6:
+				*ofs += l2_length;
+				break;
+			case CPY_SELECT_PORT:
+				*ofs += l2_length + l3_length;
+				break;
+			case CPY_SELECT_TEID:
+			case CPY_SELECT_RQI_QFI:
+				*ofs += l2_length + l3_length + l4_length;
+				break;
+			}
+			*dyn = 1;
+		}
+	}
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+		   const struct flow_attr *attr, struct flow_error *error,
+		   uint32_t port_id, uint32_t num_dest_port,
+		   uint32_t num_queues, uint32_t *packet_data,
+		   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+	uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+			    num_queues;
+	uint32_t flm_key_id = 0;
+	uint32_t flm_ft = 0;
+	uint16_t flm_rpl_ext_ptr = 0;
+
+	struct flow_handle *fh_flm = NULL;
+	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+	if (!fh) {
+		NT_LOG(ERR, FILTER, "ERR memory\n");
+		flow_nic_set_error(ERR_MEMORY, error);
+		return NULL;
+	}
+
+	fh->type = FLOW_HANDLE_TYPE_FLOW;
+	fh->port_id = port_id;
+	fh->dev = dev;
+	fh->fd = fd;
+
+	int setup_cat_cfn = 0;
+	int setup_cat_cot = 0;
+	int setup_cat_cts = 0;
+	int setup_qsl_rcp = 0;
+
+	int setup_flm = 0;
+	int setup_flm_ft = 0;
+
+	int setup_km = 0;
+	int setup_km_ft = 0;
+	int setup_km_rcp = 0;
+
+	int setup_default_ft = 0;
+
+	int setup_hst = 0;
+	int setup_tpe = 0;
+	int setup_tpe_encap_data = 0;
+
+	int free_fd = 0;
+
+	const int empty_pattern =
+		fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+		fd->vlans == 0 && fd->tunnel_prot < 0 &&
+		fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+	if (attr->group > 0 && empty_pattern) {
+		/*
+		 * Group 0 default filter actions
+		 */
+		struct flow_handle *fh_miss = NULL;
+
+		if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+			/* Error was printed to log by flm_flow_get_group_miss_fh */
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (fh_miss == NULL) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not setup default action for uninitialized group\n");
+			flow_nic_set_error(ERR_FAILED, error);
+			free(fh);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+						qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+					  fh_miss->resource[RES_QSL_RCP].index)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not reference QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		fh->resource[RES_QSL_RCP].count = 1;
+		fh->resource[RES_QSL_RCP].index =
+			fh_miss->resource[RES_QSL_RCP].index;
+		fh->resource[RES_QSL_RCP].referenced = 1;
+
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_qsl_rcp = 1;
+	} else if (attr->group > 0) {
+		/*
+		 * FLM programming
+		 */
+		struct flow_handle *fh_existing = NULL;
+		int cfn_to_copy = -1;
+
+		if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Priority value of FLM flow exceeds %u"
+			       "\n",
+			       dev->ndev->be.flm.nb_prios);
+			flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+					   packet_mask, &flm_key_id, &flm_ft,
+					   &cfn_to_copy, &setup_km_ft,
+					   &fh_existing)) {
+			/* Error was printed to log by flm_flow_learn_prepare */
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			free(fh);
+			return NULL;
+		}
+
+		setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+		setup_tpe =
+			(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+		/* Create HIT filter for new FLM FT */
+		if (cfn_to_copy >= 0) {
+			uint32_t value = 0;
+
+			nic_insert_flow(dev->ndev, fh);
+
+			setup_qsl_rcp = 1;
+			setup_cat_cot = 1;
+			setup_cat_cts = 1;
+
+			setup_default_ft = 1;
+
+			setup_flm = 1;
+			setup_flm_ft = (int)flm_ft;
+
+			setup_tpe |= setup_tpe_encap_data;
+
+			if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+					fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+				setup_hst = 1;
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_CFN,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT CFN resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_CAT_COT,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get CAT COT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_QSL_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			if (qsl_size > 0 &&
+					flow_nic_allocate_fh_resource(dev->ndev,
+								      RES_QSL_QST,
+								      fh, qsl_size, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get QSL QST resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			NT_LOG(DBG, FILTER,
+			       "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+			       fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+			       setup_flm_ft);
+
+			/* Copy parts from base MISS filter */
+			hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+					   fh->resource[RES_CAT_CFN].index, 0,
+					   cfn_to_copy);
+			hw_mod_cat_cfn_flush(&dev->ndev->be,
+					     fh->resource[RES_CAT_CFN].index,
+					     1);
+
+			hw_mod_cat_kcs_km_get(&dev->ndev->be,
+					      HW_CAT_KCS_CATEGORY,
+					      KM_FLM_IF_FIRST, cfn_to_copy,
+					      &value);
+			if (value > 0) {
+				setup_km = 1;
+				setup_km_rcp = (int)value;
+			}
+
+			hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST, cfn_to_copy,
+					       &value);
+			hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+					       HW_CAT_KCS_CATEGORY,
+					       KM_FLM_IF_FIRST,
+					       fh->resource[RES_CAT_CFN].index,
+					       value);
+			hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+						 KM_FLM_IF_FIRST,
+						 fh->resource[RES_CAT_CFN].index,
+						 1);
+
+			fh_flm = calloc(1, sizeof(struct flow_handle));
+			if (!fh_flm) {
+				flow_nic_set_error(ERR_MEMORY, error);
+				return NULL;
+			}
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh;
+		} else {
+			/* Reuse allocated memory */
+			fh_flm = fh;
+			fh = fh_existing;
+
+			nic_insert_flow_flm(dev->ndev, fh_flm);
+
+			fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+			fh_flm->dev = dev;
+			fh_flm->flm_owner = fh_existing;
+
+			free_fd = 1;
+		}
+
+		fh_flm->flm_owner->flm_ref_count += 1;
+	} else {
+		/*
+		 * Filter creation
+		 */
+		nic_insert_flow(dev->ndev, fh);
+
+		setup_cat_cfn = 1;
+		setup_cat_cts = 1;
+		setup_qsl_rcp = 1;
+
+		if (fd->km.num_ftype_elem) {
+			struct flow_handle *flow = dev->ndev->flow_base,
+						    *found_flow = NULL;
+			int identical_flow_found = 0;
+
+			/* Compute new KM key */
+			if (km_key_create(&fd->km, fh->port_id)) {
+				NT_LOG(ERR, FILTER, "KM creation failed\n");
+				flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+						   error);
+				return NULL;
+			}
+
+			fd->km.be = &dev->ndev->be;
+
+			/* Find existing KM key that can be reused */
+			while (flow) {
+				if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+						flow->fd->km
+						.flow_type && /* This check also skips self */
+						flow->resource[RES_KM_CATEGORY].count) {
+					int res = km_key_compare(&fd->km,
+								 &flow->fd->km);
+					if (res < 0) {
+						identical_flow_found = 1;
+						found_flow = flow;
+						break;
+					} else if (res > 0 &&
+							!flow->resource[RES_KM_CATEGORY]
+							.referenced &&
+							found_flow == NULL)
+						found_flow = flow;
+				}
+				flow = flow->next;
+			}
+				if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+					found_flow, identical_flow_found, dev, fd, error, fh, flow))
+					return NULL;
+		}
+
+		setup_default_ft = 1;
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+						  fh, 1, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get CAT CFN resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+						  1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL RCP resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		if (qsl_size > 0 &&
+				flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+							      fh, qsl_size, 1)) {
+			NT_LOG(ERR, FILTER,
+			       "ERROR: Could not get QSL QST resource\n");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+					   error);
+			return NULL;
+		}
+
+		/* Check if filter is set up for FLM */
+		if (fd->jump_to_group != UINT32_MAX) {
+			flm_flow_setup_group(dev, fd->jump_to_group,
+					     fh->resource[RES_CAT_CFN].index,
+					     fh->resource[RES_KM_FLOW_TYPE].index,
+					     fh);
+		}
+	}
+
+	/*
+	 * Setup QSL
+	 */
+	if (setup_qsl_rcp) {
+		if (qsl_size == 0) {
+			/* Create drop filter */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x3);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index, 0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index, 0);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+		} else {
+			const int table_start = fh->resource[RES_QSL_QST].index;
+			const int table_end = table_start +
+					      fh->resource[RES_QSL_QST].count -
+					      1;
+
+			/* Use 0x0 for pure retransmit */
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+					   fh->resource[RES_QSL_RCP].index,
+					   0x0);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+					   fh->resource[RES_QSL_RCP].index,
+					   num_dest_port > 0 ? 0x3 : 0x0);
+
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_start);
+			hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+					   fh->resource[RES_QSL_RCP].index,
+					   table_end);
+
+			hw_mod_qsl_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_QSL_RCP].index,
+					     1);
+
+			/* Setup QSL QST/QEN */
+			if (num_dest_port > 0 && num_queues > 0) {
+				int ports[num_dest_port];
+				int queues[num_queues];
+
+				int port_index = 0;
+				int queue_index = 0;
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					if (fd->dst_id[i].type == PORT_PHY) {
+						ports[port_index++] =
+							fd->dst_id[i].id;
+					} else if (fd->dst_id[i].type ==
+							PORT_VIRT) {
+						queues[queue_index++] =
+							fd->dst_id[i].id;
+					}
+				}
+
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   ports[i % num_dest_port]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   queues[i % num_queues]);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			} else if (num_dest_port > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_TX_PORT,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_LRE,
+							   table_start + i, 1);
+				}
+			} else if (num_queues > 0) {
+				for (int i = 0; i < fd->dst_num_avail; ++i) {
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_QUEUE,
+							   table_start + i,
+							   fd->dst_id[i].id);
+					hw_mod_qsl_qst_set(&dev->ndev->be,
+							   HW_QSL_QST_EN,
+							   table_start + i, 1);
+				}
+			}
+
+			hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+					     fd->dst_num_avail);
+		}
+	}
+
+	/*
+	 * Setup CAT KM functionality
+	 */
+	if (setup_km) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FS for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 setup_km_ft, 0, 1);
+
+		/* KM function select */
+		hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index,
+				      setup_km_rcp);
+		hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index, 1);
+
+		/* KM function enable */
+		hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST,
+				      fh->resource[RES_CAT_CFN].index / 8, &bm);
+		hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					fh->resource[RES_CAT_CFN].index / 8, 1);
+	} else if (setup_default_ft) {
+		/* Enable "no KM match" FT for key A */
+		set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				 0, 0, 1);
+	}
+
+	/*
+	 * Setup CAT FLM functionality
+	 */
+	if (setup_flm) {
+		uint32_t bm = 0;
+
+		/* Enable KM match FT for key A, and FLM match FT for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1); /* KM FT A */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_flm_ft, 2, 1); /* FLM FT C */
+
+		/* FLM function enable */
+		hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST,
+				       fh->resource[RES_CAT_CFN].index / 8,
+				       &bm);
+		hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+				       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+				       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+		hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+					 fh->resource[RES_CAT_CFN].index / 8,
+					 1);
+	} else if (setup_default_ft) {
+		/* Enable KM for key A and UNHANDLED for key C */
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+				  setup_km_ft, 0, 1);
+		set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+				  2, 1);
+	}
+
+	/*
+	 * Setup HST
+	 */
+	if (setup_hst) {
+		int hst_index = -1;
+
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+			uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+						       i))
+				continue;
+
+			hw_mod_hst_rcp_get(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE, i,
+					   &values[0]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   i, &values[1]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   i, &values[2]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   i, &values[3]);
+			hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   i, &values[4]);
+
+			if ((int)values[0] == 1 &&
+					(int)values[1] == fd->header_strip_start_dyn &&
+					(int)values[2] == fd->header_strip_start_ofs &&
+					(int)values[3] == fd->header_strip_end_dyn &&
+					(int)values[4] == fd->header_strip_end_ofs) {
+				hst_index = i;
+				break;
+			}
+		}
+
+		if (hst_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+						  hst_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_HST_RCP].count = 1;
+			fh->resource[RES_HST_RCP].index = hst_index;
+			fh->resource[RES_HST_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_HST_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get HST RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_STRIP_MODE,
+					   fh->resource[RES_HST_RCP].index, 1);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_start_ofs);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_dyn);
+			hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_end_ofs);
+
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_CMD,
+					   fh->resource[RES_HST_RCP].index,
+					   fd->header_strip_removed_outer_ip ? 7 : 6);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_DYN,
+					   fh->resource[RES_HST_RCP].index, 2);
+			hw_mod_hst_rcp_set(&dev->ndev->be,
+					   HW_HST_RCP_MODIF0_OFS,
+					   fh->resource[RES_HST_RCP].index, 0);
+
+			hw_mod_hst_rcp_flush(&dev->ndev->be,
+					     fh->resource[RES_HST_RCP].index, 1);
+		}
+	}
+
+	/*
+	 * Setup TPE
+	 */
+	if (setup_tpe_encap_data) {
+		int ext_rpl_index = -1;
+		int rpl_rpl_index = -1;
+		int rpl_rpl_length = -1;
+
+		/* Find existing RPL */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+			int found = 1;
+			uint32_t len;
+			uint32_t ptr;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN, i,
+					       &len);
+			if (len != fd->tun_hdr.len)
+				continue;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+			for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+					++ptr_it) {
+				uint32_t data[4];
+
+				hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       ptr + ptr_it, data);
+
+				if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+						data[0] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+						data[1] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+						data[2] ||
+						fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+						data[3]) {
+					found = 0;
+					break;
+				}
+			}
+
+			if (found) {
+				ext_rpl_index = i;
+				rpl_rpl_index = (int)ptr;
+				rpl_rpl_length = (int)len;
+				break;
+			}
+		}
+
+		/* Set RPL data */
+		if (ext_rpl_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+						  ext_rpl_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+				if (flow_nic_ref_resource(dev->ndev,
+							  RES_TPE_RPL,
+							  rpl_rpl_index + i)) {
+					NT_LOG(ERR, FILTER,
+					       "ERROR: Could not reference TPE RPL resource\n");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+							   error);
+					return NULL;
+				}
+			}
+		} else {
+			ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+								RES_TPE_EXT, 1);
+			if (ext_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE EXT resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+			rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+								       RES_TPE_RPL,
+								       rpl_rpl_length,
+								       1);
+			if (rpl_rpl_index < 0) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RPL resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Program new encap header data */
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       ext_rpl_index, rpl_rpl_index);
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       ext_rpl_index, fd->tun_hdr.len);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+						 1);
+
+			for (int i = 0; i < rpl_rpl_length; ++i) {
+				hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+						       HW_TPE_RPL_RPL_VALUE,
+						       rpl_rpl_index + i,
+						       fd->tun_hdr.d.hdr32 + i * 4);
+			}
+			hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+						 rpl_rpl_length);
+		}
+
+		flm_rpl_ext_ptr = ext_rpl_index;
+	}
+
+	if (setup_tpe) {
+		const uint32_t eth_length = 14;
+		const uint32_t l2_length = fd->tun_hdr.l2_len;
+		const uint32_t l3_length = fd->tun_hdr.l3_len;
+		const uint32_t l4_length = fd->tun_hdr.l4_len;
+		const uint32_t fcs_length = 4;
+
+		int tpe_index = -1;
+
+		/* Find existing RCP */
+		for (int i = 1;
+				i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+			uint32_t value;
+
+			if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+						       i))
+				continue;
+
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_LEN, i, &value);
+			if (value != fd->tun_hdr.len)
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_DYN, i, &value);
+			if (value != (fd->tun_hdr.len > 0 ? 1 : 0))
+				continue;
+			hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+					       HW_TPE_RPL_RCP_OFS, i, &value);
+			if (value != 0)
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_L3_PRT, i,
+					       &value);
+			if (value != (fd->tun_hdr.new_outer ?
+				      (fd->tun_hdr.ip_version == 4 ? 1 : 2) : 0))
+				continue;
+			hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+					       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+					       &value);
+			if (value != (fd->tun_hdr.new_outer ? l2_length :
+				      (fd->tun_hdr.len == 0 ? 0 : fd->tun_hdr.len - eth_length)))
+				continue;
+
+			tpe_index = i;
+			break;
+		}
+
+		/* Set RCP data */
+		if (tpe_index >= 0) {
+			if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+						  tpe_index)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not reference TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			fh->resource[RES_TPE_RCP].count = 1;
+			fh->resource[RES_TPE_RCP].index = tpe_index;
+			fh->resource[RES_TPE_RCP].referenced = 1;
+		} else {
+			if (flow_nic_allocate_fh_resource(dev->ndev,
+							  RES_TPE_RCP,
+							  fh, 1, 1)) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR: Could not get TPE RCP resource\n");
+				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+						   error);
+				return NULL;
+			}
+
+			/* Extend packet if needed. */
+			if (fd->tun_hdr.len > eth_length) {
+				/* Extend FPGA packet buffer */
+				hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPP_RCP_EXP,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+
+				/*
+				 * Insert 0's into packet
+				 * After this step DYN offsets are shifted by encap length,
+				 * so only DYN offset 1 and 18 should be used
+				 */
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index, 1);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index, 0);
+				hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+						       HW_TPE_INS_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length);
+				hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			if (fd->tun_hdr.len > 0) {
+				/* Write header data to beginning of packet */
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_RPL_PTR,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+						       HW_TPE_RPL_RCP_EXT_PRIO,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			for (unsigned int i = 0; i < fd->modify_field_count;
+					++i) {
+				uint32_t select = fd->modify_field[i].select;
+				uint32_t dyn = fd->modify_field[i].dyn;
+				uint32_t ofs = fd->modify_field[i].ofs;
+				uint32_t len = fd->modify_field[i].len;
+
+				align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+					l3_length, l4_length, &dyn);
+
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_READER_SELECT,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       select);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_DYN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       dyn);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_OFS,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       ofs);
+				hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+						       HW_TPE_CPY_RCP_LEN,
+						       fh->resource[RES_TPE_RCP].index +
+						       16 * i,
+						       len);
+				hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index +
+							 16 * i,
+							 1);
+			}
+
+			if (fd->tun_hdr.new_outer) {
+				/*
+				 * UDP length
+				 * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+				 */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + 4);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* IPv4/IPv6 length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length +
+						       (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length +
+						       (fd->tun_hdr.ip_version == 4 ?
+						       0 : l3_length) + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* GTP length */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length + l4_length + 2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       18);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       -(l2_length + l3_length + l4_length +
+						       8 + fcs_length));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+
+				/* _update TTL */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_WR,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_enable);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_DYN,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->ttl_sub_outer ? 1 : DYN_L3);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TTL_POS_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->ttl_sub_outer ?
+						       l2_length :
+						       fd->tun_hdr.len - eth_length) +
+						       (fd->ttl_sub_ipv4 ? 8 : 7));
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       1);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       6);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       2);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       l2_length + l3_length);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L3_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_INNER_L4_OFS,
+						       fh->resource[RES_TPE_RCP].index,
+						       fd->tun_hdr.len - eth_length
+							- 4 * fd->tun_hdr.nb_vlans);
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			} else {
+				/* _update TTL */
+				if (fd->ttl_sub_enable) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_enable);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->ttl_sub_outer ? DYN_L3 :
+							       DYN_TUN_L3);
+					if (fd->tun_hdr.len == 0) {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       fd->ttl_sub_ipv4 ? 8 : 7);
+					} else {
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_HFU_RCP_TTL_POS_OFS,
+								       fh->resource[RES_TPE_RCP]
+								       .index,
+								       (fd->tun_hdr.len -
+								       eth_length) +
+								       (fd->ttl_sub_ipv4 ?
+								       8 : 7));
+					}
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_WR,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_DYN,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_TTL_POS_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				}
+
+				/* _update FPGA DYN offsets */
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_CS_INF,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L3_FRAG,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_TUNNEL,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+						       HW_TPE_HFU_RCP_L4_PRT,
+						       fh->resource[RES_TPE_RCP].index,
+						       0);
+				if (fd->tun_hdr.len == 0) {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       0);
+				} else {
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_OUTER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L3_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+					hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+							       HW_TPE_HFU_RCP_INNER_L4_OFS,
+							       fh->resource[RES_TPE_RCP].index,
+							       fd->tun_hdr.len - eth_length);
+				}
+
+				hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+							 fh->resource[RES_TPE_RCP].index,
+							 1);
+			}
+
+			/* Calculate valid outer and inner checksums */
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_OUTER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L3_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+					       HW_TPE_CSU_RCP_INNER_L4_CMD,
+					       fh->resource[RES_TPE_RCP].index,
+					       3);
+			hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+						 fh->resource[RES_TPE_RCP].index,
+						 1);
+		}
+	}
+
+	/*
+	 * Setup CAT Color Table functionality
+	 */
+	if (setup_cat_cot) {
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+				   fh->resource[RES_CAT_COT].index, 0);
+		hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+				   fh->resource[RES_CAT_COT].index, 0x4);
+		hw_mod_cat_cot_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_COT].index, 1);
+	}
+
+	/*
+	 * Setup CAT action functionality
+	 */
+	if (setup_cat_cts) {
+		/* Setup CAT CTS */
+		const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   fh->resource[RES_CAT_COT].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 0,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 1,
+				   fh->resource[RES_QSL_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 2,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 3,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   fh->resource[RES_HST_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 4,
+				   0);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   fh->resource[RES_TPE_RCP].index);
+		hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+				   offset * fh->resource[RES_CAT_CFN].index + 5,
+				   0);
+
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+		hw_mod_cat_cts_flush(&dev->ndev->be,
+				     offset * fh->resource[RES_CAT_CFN].index,
+				     6);
+
+		/* Setup CAT CTE */
+		hw_mod_cat_cte_set(&dev->ndev->be,
+				   HW_CAT_CTE_ENABLE_BM,
+				   fh->resource[RES_CAT_CFN].index,
+				   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+				   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+				   0x040 |
+				   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+				   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+		hw_mod_cat_cte_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/*
+	 * Setup CAT CFN
+	 *
+	 * Once CAT CFN has been programmed traffic will start match the filter,
+	 * so CAT CFN must be the last thing to be programmed.
+	 */
+	if (setup_cat_cfn) {
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+		/* Protocol checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (0xf << fd->vlans) & 0xf);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->fragmentation);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_PTC_TUNNEL,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l3_prot != -1 ?
+				   (1 << fd->tunnel_l3_prot) : -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   fd->tunnel_l4_prot != -1 ?
+				   (1 << fd->tunnel_l4_prot) : -1);
+
+		/* Error checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be,
+				   HW_CAT_CFN_ERR_TNL_TTL_EXP,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+		/* MAC port check */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+				   fh->resource[RES_CAT_CFN].index, 0,
+				   1 << fh->port_id);
+
+		/* Pattern match checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* Length checks */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x0);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+				   fh->resource[RES_CAT_CFN].index, 0, -1);
+
+		/* KM and FLM */
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x1);
+		hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+				   fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+		hw_mod_cat_cfn_flush(&dev->ndev->be,
+				     fh->resource[RES_CAT_CFN].index, 1);
+	}
+
+	/* Program FLM flow */
+	if (fh_flm) {
+		convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+				  flm_rpl_ext_ptr, attr->priority);
+		flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+	}
+
+	if (free_fd)
+		free(fd);
+
+	return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+	if (!ndev->flow_mgnt_prepared) {
+		/* Check static arrays are big enough */
+		assert(ndev->be.tpe.nb_cpy_writers <=
+		       MAX_CPY_WRITERS_SUPPORTED);
+
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* FLM Flow Type 0 and 1 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+		/* CAT CFN 0 is reserved as a low priority catch all filter */
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+				   0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+		/* Initialize QSL with unmatched recipe index 0 - discard */
+		if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+		/* Initialize QST with default index 0 */
+		if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+				       0x0) < 0)
+			goto err_exit0;
+		if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+		/* HST & TPE index 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+				0)
+			goto err_exit0;
+		if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+				0)
+			goto err_exit0;
+
+		if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+		/* Set default hasher recipe to 5-tuple */
+		flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+		flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+		/*
+		 * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+		 * this entry
+		 */
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+			goto err_exit0;
+
+		flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+		/* Unblock MAC and MAC statistics on this NIC */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+			goto err_exit0;
+		/* block keep alive - not needed */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+			goto err_exit0;
+		/*
+		 * Unblock all MAC ports
+		 */
+		if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+			goto err_exit0;
+
+		/*
+		 *  unblock RPP slices
+		 */
+		hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+		if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+			goto err_exit0;
+
+		/* FLM */
+		if (flm_sdram_calibrate(ndev) < 0)
+			goto err_exit0;
+		if (flm_sdram_reset(ndev, 1) < 0)
+			goto err_exit0;
+		flm_flow_handle_create(&ndev->flm_res_handle);
+
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+				       0); /* Learn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+				       0); /* Learn fail status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+				       0); /* Learn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+				       0); /* Unlearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+				       0); /* Unlearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+				       0); /* Relearn done status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+				       0); /* Relearn ignore status */
+		hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+		hw_mod_flm_control_flush(&ndev->be);
+
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+				    0); /* Drop at 100% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+				    6); /* Drop at 37,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+				    4); /* Drop at 25% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+				    2); /* Drop at 12,5% FIFO fill level */
+		hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+		hw_mod_flm_prio_flush(&ndev->be);
+
+		for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+					   FLM_PERIODIC_STATS_BYTE_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+					   FLM_PERIODIC_STATS_PKT_LIMIT);
+			hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+					   FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+		}
+		hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+		hw_mod_flm_stat_update(&ndev->be);
+
+		ndev->flm_mtr_handle =
+			calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+		ndev->ft_res_handle =
+			calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+		ndev->mtr_stat_handle =
+			calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+		if (ndev->flm_mtr_handle == NULL ||
+				ndev->ft_res_handle == NULL ||
+				ndev->mtr_stat_handle == NULL)
+			goto err_exit0;
+
+		struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+		for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+			__atomic_store_n(&mtr_stat[i].n_pkt, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].n_bytes, 0, __ATOMIC_RELAXED);
+			__atomic_store_n(&mtr_stat[i].stats_mask, 0, __ATOMIC_RELAXED);
+		}
+
+		if (flow_group_handle_create(&ndev->group_handle,
+					     FLM_FLOW_RCP_MAX))
+			goto err_exit0;
+
+		ndev->flow_mgnt_prepared = 1;
+	}
+	return 0;
+
+err_exit0:
+	done_flow_management_of_ndev_profile_inline(ndev);
+	return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+	ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+				       FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (ndev->flow_mgnt_prepared) {
+		flm_sdram_reset(ndev, 0);
+		flm_flow_handle_remove(&ndev->flm_res_handle);
+
+		flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+		hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+		hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+		flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+		flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+		free(ndev->flm_mtr_handle);
+		free(ndev->ft_res_handle);
+		free(ndev->mtr_stat_handle);
+		flow_group_handle_destroy(&ndev->group_handle);
+
+		hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+		hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+		hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+		hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+		hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+		hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+		hw_mod_tpe_reset(&ndev->be);
+		flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+		flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+		flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+		hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+		hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+		hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+		hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+		hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+		flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+		ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+					       FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+		ndev->flow_mgnt_prepared = 0;
+	}
+
+	return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error)
+{
+	uint32_t port_id = 0;
+	uint32_t num_dest_port = 0;
+	uint32_t num_queues = 0;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+							  error, 0, &port_id,
+							  &num_dest_port, &num_queues,
+							  packet_data, packet_mask,
+							  &key_def);
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	if (!fd)
+		return -1;
+
+	free(fd);
+	return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr, const struct flow_elem elem[],
+	const struct flow_action action[], struct flow_error *error)
+{
+	struct flow_handle *fh = NULL;
+
+	uint32_t port_id = UINT32_MAX;
+	uint32_t num_dest_port;
+	uint32_t num_queues;
+
+	uint32_t packet_data[10];
+	uint32_t packet_mask[10];
+	struct flm_flow_key_def_s key_def;
+
+	struct flow_attr attr_local;
+
+	memcpy(&attr_local, attr, sizeof(struct flow_attr));
+	if (attr_local.group > 0)
+		attr_local.forced_vlan_vid = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+
+	struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+							  attr_local.forced_vlan_vid,
+							  &port_id, &num_dest_port,
+							  &num_queues, packet_data,
+							  packet_mask, &key_def);
+	if (!fd)
+		goto err_exit;
+
+	/* Translate group IDs */
+	if (fd->jump_to_group != UINT32_MAX &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, fd->jump_to_group,
+					&fd->jump_to_group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+	if (attr_local.group > 0 &&
+			flow_group_translate_get(dev->ndev->group_handle,
+					attr_local.caller_id, attr_local.group,
+					&attr_local.group)) {
+		NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		goto err_exit;
+	}
+
+	if (port_id == UINT32_MAX)
+		port_id = dev->port_id;
+
+	/* Create and flush filter to NIC */
+	fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+				num_dest_port, num_queues, packet_data,
+				packet_mask, &key_def);
+	if (!fh)
+		goto err_exit;
+
+	NT_LOG(DBG, FILTER,
+	       "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+	       fd);
+	NT_LOG(DBG, FILTER,
+	       ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+	       dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return fh;
+
+err_exit:
+	if (fh)
+		flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+	return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *fh,
+				       struct flow_error *error)
+{
+	assert(dev);
+	assert(fh);
+
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	/* take flow out of ndev list - may not have been put there yet */
+	if (fh->type == FLOW_HANDLE_TYPE_FLM)
+		nic_remove_flow_flm(dev->ndev, fh);
+
+	else
+		nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+	if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+		err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+		if (fh->flm_rpl_ext_ptr > 0 &&
+				flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+							(int)fh->flm_rpl_ext_ptr) == 0) {
+			uint32_t ptr = 0;
+			uint32_t len = 0;
+
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_RPL_PTR,
+					       (int)fh->flm_rpl_ext_ptr, &ptr);
+			hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+					       HW_TPE_RPL_EXT_META_RPL_LEN,
+					       (int)fh->flm_rpl_ext_ptr, &len);
+
+			hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+					       HW_TPE_PRESET_ALL,
+					       (int)fh->flm_rpl_ext_ptr, 0);
+			hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+						 (int)fh->flm_rpl_ext_ptr, 1);
+
+			for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+				if (flow_nic_deref_resource(dev->ndev,
+							    RES_TPE_RPL,
+							    (int)(ptr + ii)) == 0) {
+					uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+					hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+							       HW_TPE_PRESET_ALL,
+							       (int)(ptr + ii),
+							       rpl_zero);
+					hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+								 (int)(ptr + ii),
+								 1);
+				}
+			}
+		}
+
+		flow_group_translate_release(dev->ndev->group_handle,
+					     fh->flm_owner->flm_group_index);
+
+		fh->flm_owner->flm_ref_count -= 1;
+		if (fh->flm_owner->flm_ref_count == 0) {
+			err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+			err |= flow_destroy_locked_profile_inline(dev,
+								  fh->flm_owner,
+								  error);
+		}
+	} else {
+		NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+		if (fh->fd) {
+			if (fh->fd->km.num_ftype_elem)
+				km_clear_data_match_entry(&fh->fd->km);
+
+			if (fh->fd->jump_to_group != UINT32_MAX) {
+				err |= flm_flow_destroy_group(dev,
+							      fh->fd->jump_to_group);
+				flow_group_translate_release(dev->ndev->group_handle,
+							     fh->fd->jump_to_group);
+			}
+		}
+
+		for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+			if (fh->resource[res_type].count < 1)
+				continue;
+
+			for (int ii = 0; ii < fh->resource[res_type].count;
+					ii++) {
+				/* If last ref count of this resource, free it */
+				if (flow_nic_deref_resource(dev->ndev,
+							    res_type,
+							    fh->resource[res_type].index +
+							    ii) == 0) {
+					/* Free resource up in NIC */
+					switch (res_type) {
+					case RES_CAT_CFN:
+						assert(ii == 0);
+						err |= reset_cat_function_setup(dev,
+							fh->resource[RES_CAT_CFN]
+							.index + ii);
+						break;
+
+					case RES_QSL_QST:
+						hw_mod_qsl_qst_set(&dev->ndev->be,
+								   HW_QSL_QST_PRESET_ALL,
+								   fh->resource[RES_QSL_QST]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_qst_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_QST]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_QSL_RCP:
+						hw_mod_qsl_rcp_set(&dev->ndev->be,
+								   HW_QSL_RCP_PRESET_ALL,
+								   fh->resource[RES_QSL_RCP]
+								   .index + ii,
+								   0);
+						hw_mod_qsl_rcp_flush(&dev->ndev->be,
+								     fh->resource[RES_QSL_RCP]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_CAT_COT:
+						hw_mod_cat_cot_set(&dev->ndev->be,
+								   HW_CAT_COT_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_cat_cot_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_KM_CATEGORY:
+						assert(ii == 0);
+						hw_mod_km_rcp_set(&dev->ndev->be,
+								  HW_KM_RCP_PRESET_ALL,
+								  fh->resource[res_type]
+								  .index + ii,
+								  0, 0);
+						hw_mod_km_rcp_flush(&dev->ndev->be,
+								    fh->resource[res_type]
+								    .index + ii,
+								    1);
+						break;
+
+					case RES_KM_FLOW_TYPE: {
+						struct flm_flow_ft_ident_s *ft_idents =
+							(struct flm_flow_ft_ident_s
+							 *)dev->ndev
+							->ft_res_handle;
+						ft_idents[fh->resource[res_type]
+							  .index +
+							  ii]
+						.data = 0;
+					}
+					break;
+
+					case RES_FLM_RCP:
+						assert(ii == 0);
+						err |= flm_flow_destroy_rcp(dev,
+									    fh->resource[res_type]
+									    .index + ii);
+						break;
+
+					case RES_FLM_FLOW_TYPE:
+						/* Nothing needed */
+						break;
+
+					case RES_HSH_RCP:
+						hw_mod_hsh_rcp_set(&dev->ndev->be,
+								   HW_HSH_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0, 0);
+						hw_mod_hsh_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index +
+								     ii,
+								     1);
+						break;
+
+					case RES_PDB_RCP:
+						hw_mod_pdb_rcp_set(&dev->ndev->be,
+								   HW_PDB_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_pdb_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_HST_RCP:
+						hw_mod_hst_rcp_set(&dev->ndev->be,
+								   HW_HST_RCP_PRESET_ALL,
+								   fh->resource[res_type]
+								   .index + ii,
+								   0);
+						hw_mod_hst_rcp_flush(&dev->ndev->be,
+								     fh->resource[res_type]
+								     .index + ii,
+								     1);
+						break;
+
+					case RES_TPE_RCP:
+						hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+								       HW_TPE_PRESET_ALL,
+								       fh->resource[res_type]
+								       .index + ii,
+								       0);
+						hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+									 fh->resource[res_type]
+									 .index + ii,
+									 1);
+						break;
+
+					case RES_TPE_EXT:
+						/* Nothing needed */
+						break;
+
+					case RES_TPE_RPL:
+						/* Nothing needed */
+						break;
+
+					default:
+						err |= -1;
+						break;
+					}
+				}
+			}
+		}
+		free(fh->fd);
+	}
+
+	if (err) {
+		NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+		flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+	}
+
+	free(fh);
+
+#ifdef FLOW_DEBUG
+	dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+					    FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+	return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error)
+{
+	int err = 0;
+
+	flow_nic_set_error(ERR_SUCCESS, error);
+
+	pthread_mutex_lock(&dev->ndev->mtx);
+	if (flow) {
+		/* Delete this flow */
+		err = flow_destroy_locked_profile_inline(dev, flow, error);
+	} else {
+		/* Delete all created flows from this eth device */
+		flow = dev->ndev->flow_base;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+
+		/* Delete all created FLM flows from this eth device */
+		flow = dev->ndev->flow_base_flm;
+
+		while (flow && !err) {
+			if (flow->dev == dev) {
+				struct flow_handle *flow_next = flow->next;
+
+				err = flow_destroy_locked_profile_inline(dev,
+									 flow,
+									 NULL);
+				flow = flow_next;
+			} else {
+				flow = flow->next;
+			}
+		}
+	}
+
+	pthread_mutex_unlock(&dev->ndev->mtx);
+
+	return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_flush is not supported";
+	return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+			      UNUSED struct flow_handle *flow,
+			      UNUSED const struct flow_action *action,
+			      void **data, uint32_t *length,
+			      struct flow_error *error)
+{
+	NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+	*length = 0;
+	*data = NULL;
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "rte_flow_query is not supported";
+	return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size)
+{
+	const enum hw_flm_e fields[] = {
+		HW_FLM_STAT_FLOWS,	HW_FLM_STAT_LRN_DONE,
+		HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+		HW_FLM_STAT_UNL_DONE,	HW_FLM_STAT_UNL_IGNORE,
+		HW_FLM_STAT_AUL_DONE,	HW_FLM_STAT_AUL_IGNORE,
+		HW_FLM_STAT_AUL_FAIL,	HW_FLM_STAT_TUL_DONE,
+		HW_FLM_STAT_REL_DONE,	HW_FLM_STAT_REL_IGNORE,
+		HW_FLM_STAT_PRB_DONE,	HW_FLM_STAT_PRB_IGNORE,
+
+		HW_FLM_STAT_STA_DONE,	HW_FLM_STAT_INF_DONE,
+		HW_FLM_STAT_INF_SKIP,	HW_FLM_STAT_PCK_HIT,
+		HW_FLM_STAT_PCK_MISS,	HW_FLM_STAT_PCK_UNH,
+		HW_FLM_STAT_PCK_DIS,	HW_FLM_STAT_CSH_HIT,
+		HW_FLM_STAT_CSH_MISS,	HW_FLM_STAT_CSH_UNH,
+		HW_FLM_STAT_CUC_START,	HW_FLM_STAT_CUC_MOVE,
+	};
+
+	const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+	if (size < fields_cnt)
+		return -1;
+
+	hw_mod_flm_stat_update(&ndev->be);
+
+	for (uint64_t i = 0; i < fields_cnt; ++i) {
+		uint32_t value = 0;
+
+		hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+		data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+			  data[i] + value;
+		if (ndev->be.flm.ver < 18 &&
+				fields[i] == HW_FLM_STAT_PRB_IGNORE)
+			break;
+	}
+
+	return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+	if (port >= 255)
+		return -1;
+
+	int err = 0;
+	uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+	struct flow_nic_dev *ndev = dev->ndev;
+
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+					  ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+					  ifr_mtu_recipe, mtu);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+				      ifr_mtu_recipe, 1);
+	err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+				      ifr_mtu_recipe, mtu);
+
+	if (err == 0) {
+		err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+						    1);
+		err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+	}
+
+	return err;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
new file mode 100644
index 0000000000..330cc39db6
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+				       struct flow_handle *flow,
+				       struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+				 const struct flow_elem elem[],
+				 const struct flow_action action[],
+				 struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+	const struct flow_attr *attr,
+	const struct flow_elem elem[], const struct flow_action action[],
+	struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+				struct flow_handle *flow,
+				struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+			      struct flow_handle *flow,
+			      const struct flow_action *action, void **data,
+			      uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+				      uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
new file mode 100644
index 0000000000..1214b32666
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+	uint8_t adapter_no;
+	enum debug_mode_e dmode;
+	struct info_nthw *p_info_nthw;
+	struct cat_nthw *p_cat_nthw;
+	struct km_nthw *p_km_nthw;
+	struct flm_nthw *p_flm_nthw;
+	struct hsh_nthw *p_hsh_nthw;
+	struct hst_nthw *p_hst_nthw;
+	struct qsl_nthw *p_qsl_nthw;
+	struct slc_nthw *p_slc_nthw;
+	struct slc_lr_nthw *p_slc_lr_nthw;
+	struct pdb_nthw *p_pdb_nthw;
+	struct ioa_nthw *p_ioa_nthw;
+	struct roa_nthw *p_roa_nthw;
+	struct rmc_nthw *p_rmc_nthw;
+	struct hfu_nthw *p_hfu_nthw; /* TPE module */
+	struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+	struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+	struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+	struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+	struct csu_nthw *p_csu_nthw; /* TPE module */
+	struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \
+	int __debug__ = 0;                                             \
+	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+		do {                                                   \
+			mod##_nthw_set_debug_mode(inst, 0xFF);            \
+			__debug__ = 1;                                 \
+	} while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst)                      \
+	do {                                             \
+		if (__debug__)                           \
+			mod##_nthw_set_debug_mode(inst, 0); \
+	} while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	be->dmode = mode;
+	return 0;
+}
+
+/*
+ *  *****************  INFO  *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ *  *****************  CAT  *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+			  (module_get_minor_version(be->p_cat_nthw->m_cat) &
+			   0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v18.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+						 cat->v18.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v18.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v18.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v18.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].err_l4_cs);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v18.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v18.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v18.cfn[cat_func].km_or);
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		r(be->p_cat_nthw, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cfn_enable(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].enable);
+			cat_nthw_cfn_inv(be->p_cat_nthw,
+				       cat->v21.cfn[cat_func].inv);
+			cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_inv);
+			cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_isl);
+			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_cfp);
+			cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].ptc_mac);
+			cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l2);
+			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_vntag);
+			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_vlan);
+			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_mpls);
+			cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l3);
+			cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].ptc_frag);
+			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+				cat->v21.cfn[cat_func].ptc_ip_prot);
+			cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].ptc_l4);
+			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+					     cat->v21.cfn[cat_func].ptc_tunnel);
+			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l2);
+			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_vlan);
+			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_mpls);
+			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l3);
+			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+						  cat->v21.cfn[cat_func].ptc_tnl_frag);
+			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+			cat_nthw_cfn_err_inv(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_inv);
+			cat_nthw_cfn_err_cv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].err_cv);
+			cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+					  cat->v21.cfn[cat_func].err_fcs);
+			cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].err_trunc);
+			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l3_cs);
+			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].err_l4_cs);
+			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l3_cs);
+			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+						   cat->v21.cfn[cat_func].err_tnl_l4_cs);
+			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].err_ttl_exp);
+			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+						     cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+			cat_nthw_cfn_mac_port(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].mac_port);
+
+			cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmp);
+			cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_dct);
+			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_ext_inv);
+			cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_cmb);
+			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+					    cat->v21.cfn[cat_func].pm_and_inv);
+			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+					   cat->v21.cfn[cat_func].pm_or_inv);
+			cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].pm_inv);
+
+			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+			cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].lc_inv);
+			cat_nthw_cfn_km0_or(be->p_cat_nthw,
+					 cat->v21.cfn[cat_func].km0_or);
+			if (be->p_cat_nthw->m_km_if_cnt > 1) {
+				cat_nthw_cfn_km1_or(be->p_cat_nthw,
+						 cat->v21.cfn[cat_func].km1_or);
+			}
+			cat_nthw_cfn_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, 0,
+					  cat->v18.kce[index + i].enable_bm);
+			cat_nthw_kce_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.kce[index + i].enable_bm[km_if_idx]);
+			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, 0,
+					    cat->v18.kcs[cat_func].category);
+			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+			cat_func++;
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+					      cat->v21.kcs[cat_func].category[km_if_idx]);
+			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int km_if_idx, int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, 0,
+					  cat->v18.fte[index + i].enable_bm);
+			cat_nthw_fte_flush(be->p_cat_nthw, 0);
+		}
+	} else if (cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+					    cat->v21.fte[index + i].enable_bm[km_if_idx]);
+			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v18.cte[cat_func].b.tpe);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	} else if (cat->ver == 22) {
+		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+			cat_nthw_cte_enable_col(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.col);
+			cat_nthw_cte_enable_cor(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.cor);
+			cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hsh);
+			cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.qsl);
+			cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.ipf);
+			cat_nthw_cte_enable_slc(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.slc);
+			cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.pdb);
+			cat_nthw_cte_enable_msk(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.msk);
+			cat_nthw_cte_enable_hst(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.hst);
+			cat_nthw_cte_enable_epp(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.epp);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.tpe);
+			cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+					     cat->v22.cte[cat_func].b.rrb);
+
+			cat_nthw_cte_flush(be->p_cat_nthw);
+			cat_func++;
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cts_select(be->p_cat_nthw, index + i);
+			cat_nthw_cts_cat_a(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_a);
+			cat_nthw_cts_cat_b(be->p_cat_nthw,
+					cat->v18.cts[index + i].cat_b);
+			cat_nthw_cts_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+			 int cat_func, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+			cat_nthw_cot_color(be->p_cat_nthw,
+					 cat->v18.cot[cat_func + i].color);
+			cat_nthw_cot_km(be->p_cat_nthw,
+				      cat->v18.cot[cat_func + i].km);
+			cat_nthw_cot_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cct_select(be->p_cat_nthw, index + i);
+			cat_nthw_cct_color(be->p_cat_nthw,
+					 cat->v18.cct[index + i].color);
+			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+			cat_nthw_cct_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+			 int ext_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+			cat_nthw_exo_dyn(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].dyn);
+			cat_nthw_exo_ofs(be->p_cat_nthw,
+				       cat->v18.exo[ext_index + i].ofs);
+			cat_nthw_exo_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_rck_select(be->p_cat_nthw, index + i);
+			cat_nthw_rck_data(be->p_cat_nthw,
+					cat->v18.rck[index + i].rck_data);
+			cat_nthw_rck_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_len_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_len_lower(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].lower);
+			cat_nthw_len_upper(be->p_cat_nthw,
+					 cat->v18.len[len_index + i].upper);
+			cat_nthw_len_dyn1(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn1);
+			cat_nthw_len_dyn2(be->p_cat_nthw,
+					cat->v18.len[len_index + i].dyn2);
+			cat_nthw_len_inv(be->p_cat_nthw,
+				       cat->v18.len[len_index + i].inv);
+			cat_nthw_len_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_kcc_key(be->p_cat_nthw,
+				       cat->v18.kcc_cam[len_index + i].key);
+			cat_nthw_kcc_category(be->p_cat_nthw,
+					      cat->v18.kcc_cam[len_index + i].category);
+			cat_nthw_kcc_id(be->p_cat_nthw,
+				      cat->v18.kcc_cam[len_index + i].id);
+			cat_nthw_kcc_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_cce_data_imm(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].imm);
+			cat_nthw_cce_data_ind(be->p_cat_nthw,
+					   cat->v22.cce[len_index + i].ind);
+			cat_nthw_cce_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+			 int len_index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+	if (cat->ver == 22) {
+		cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+			cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].cor_en);
+			cat_nthw_ccs_data_cor(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].cor);
+			cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hsh_en);
+			cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hsh);
+			cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].qsl_en);
+			cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].qsl);
+			cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].ipf_en);
+			cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].ipf);
+			cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].slc_en);
+			cat_nthw_ccs_data_slc(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].slc);
+			cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].pdb_en);
+			cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].pdb);
+			cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].msk_en);
+			cat_nthw_ccs_data_msk(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].msk);
+			cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].hst_en);
+			cat_nthw_ccs_data_hst(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].hst);
+			cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].epp_en);
+			cat_nthw_ccs_data_epp(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].epp);
+			cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].tpe_en);
+			cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].tpe);
+			cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+						 cat->v22.ccs[len_index + i].rrb_en);
+			cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+					   cat->v22.ccs[len_index + i].rrb);
+			cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_type);
+			cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb0_data);
+			cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_type);
+			cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb1_data);
+			cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_type);
+			cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+						   cat->v22.ccs[len_index + i].sb2_data);
+			cat_nthw_ccs_flush(be->p_cat_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  KM  *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+			  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+			int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_rcp_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_rcp_select(be->p_km_nthw, category + i);
+			km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_dyn);
+			km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw0_ofs);
+			km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_a);
+			km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw0_sel_b);
+			km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_dyn);
+			km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].qw4_ofs);
+			km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_a);
+			km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].qw4_sel_b);
+			km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_dyn);
+			km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+					 km->v7.rcp[category + i].dw8_ofs);
+			km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_a);
+			km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw8_sel_b);
+			km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_dyn);
+			km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw10_ofs);
+			km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_a);
+			km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+					   km->v7.rcp[category + i].dw10_sel_b);
+			km_nthw_rcp_swx_cch(be->p_km_nthw,
+					 km->v7.rcp[category + i].swx_cch);
+			km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_a);
+			km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].swx_sel_b);
+			km_nthw_rcp_mask_d_a(be->p_km_nthw,
+					 km->v7.rcp[category + i].mask_d_a);
+			km_nthw_rcp_mask_b(be->p_km_nthw,
+					km->v7.rcp[category + i].mask_b);
+			km_nthw_rcp_dual(be->p_km_nthw,
+				       km->v7.rcp[category + i].dual);
+			km_nthw_rcp_paired(be->p_km_nthw,
+					 km->v7.rcp[category + i].paired);
+			km_nthw_rcp_el_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_a);
+			km_nthw_rcp_el_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].el_b);
+			km_nthw_rcp_info_a(be->p_km_nthw,
+					km->v7.rcp[category + i].info_a);
+			km_nthw_rcp_info_b(be->p_km_nthw,
+					km->v7.rcp[category + i].info_b);
+			km_nthw_rcp_ftm_a(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_a);
+			km_nthw_rcp_ftm_b(be->p_km_nthw,
+				       km->v7.rcp[category + i].ftm_b);
+			km_nthw_rcp_bank_a(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_a);
+			km_nthw_rcp_bank_b(be->p_km_nthw,
+					km->v7.rcp[category + i].bank_b);
+			km_nthw_rcp_kl_a(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_a);
+			km_nthw_rcp_kl_b(be->p_km_nthw,
+				      km->v7.rcp[category + i].kl_b);
+			km_nthw_rcp_keyway_a(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_a);
+			km_nthw_rcp_keyway_b(be->p_km_nthw,
+					  km->v7.rcp[category + i].keyway_b);
+			km_nthw_rcp_synergy_mode(be->p_km_nthw,
+						 km->v7.rcp[category + i].synergy_mode);
+			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_dyn);
+			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw0_b_ofs);
+			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_dyn);
+			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].dw2_b_ofs);
+			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_dyn);
+			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw4_b_ofs);
+			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_dyn);
+			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+					  km->v7.rcp[category + i].sw5_b_ofs);
+			km_nthw_rcp_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int record, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		km_nthw_cam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_cam_select(be->p_km_nthw,
+					 (bank << 11) + record + i);
+			km_nthw_cam_w0(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w0);
+			km_nthw_cam_w1(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w1);
+			km_nthw_cam_w2(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w2);
+			km_nthw_cam_w3(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w3);
+			km_nthw_cam_w4(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w4);
+			km_nthw_cam_w5(be->p_km_nthw,
+				     km->v7.cam[(bank << 11) + record + i].w5);
+			km_nthw_cam_ft0(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft0);
+			km_nthw_cam_ft1(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft1);
+			km_nthw_cam_ft2(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft2);
+			km_nthw_cam_ft3(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft3);
+			km_nthw_cam_ft4(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft4);
+			km_nthw_cam_ft5(be->p_km_nthw,
+					km->v7.cam[(bank << 11) + record + i].ft5);
+			km_nthw_cam_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+			 int byte, int value, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+		km_nthw_tcam_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			if (km->v7.tcam[start_idx + i].dirty) {
+				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+				km_nthw_tcam_t(be->p_km_nthw,
+					     km->v7.tcam[start_idx + i].t);
+				km_nthw_tcam_flush(be->p_km_nthw);
+				km->v7.tcam[start_idx + i].dirty = 0;
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tci_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+			km_nthw_tci_color(be->p_km_nthw,
+					  km->v7.tci[bank * 72 + index + i].color);
+			km_nthw_tci_ft(be->p_km_nthw,
+				     km->v7.tci[bank * 72 + index + i].ft);
+			km_nthw_tci_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+			int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+	if (km->ver == 7) {
+		/* TCAM bank width in version 3 = 72 */
+		km_nthw_tcq_cnt(be->p_km_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			/* adr = lover 4 bits = bank, upper 7 bits = index */
+			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+			km_nthw_tcq_bank_mask(be->p_km_nthw,
+					      km->v7.tcq[bank + (index << 4) + i].bank_mask);
+			km_nthw_tcq_qual(be->p_km_nthw,
+					 km->v7.tcq[bank + (index << 4) + i].qual);
+			km_nthw_tcq_flush(be->p_km_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(km, be->p_km_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  FLM  *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+			  (module_get_minor_version(be->p_flm_nthw->m_flm) &
+			   0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+		flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+		flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+		flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+		flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+		flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+		flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+		flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+		flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+		flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+		flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+		flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+		flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+						   flm->v17.control->split_sdram_usage);
+		flm_nthw_control_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       0);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+		flm_nthw_status_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_status_update(be->p_flm_nthw);
+		flm_nthw_status_calibdone(be->p_flm_nthw,
+					&flm->v17.status->calibdone, 1);
+		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+				       1);
+		flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+		flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+				       1);
+		flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+		flm_nthw_timeout_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+		flm_nthw_scrub_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+		flm_nthw_load_bin_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+		flm_nthw_load_pps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+		flm_nthw_load_lps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+		flm_nthw_load_aps_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+		flm_nthw_prio_flush(be->p_flm_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_pst_select(be->p_flm_nthw, index + i);
+			flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+			flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+			flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+			flm_nthw_pst_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+			flm_nthw_rcp_lookup(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].lookup);
+			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_dyn);
+			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_ofs);
+			flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw0_sel);
+			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_dyn);
+			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].qw4_ofs);
+			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_dyn);
+			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_ofs);
+			flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw8_sel);
+			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_dyn);
+			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].sw9_ofs);
+			flm_nthw_rcp_mask(be->p_flm_nthw,
+					flm->v17.rcp[index + i].mask);
+			flm_nthw_rcp_kid(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].kid);
+			flm_nthw_rcp_opn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].opn);
+			flm_nthw_rcp_ipn(be->p_flm_nthw,
+				       flm->v17.rcp[index + i].ipn);
+			flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_dyn);
+			flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+					  flm->v17.rcp[index + i].byt_ofs);
+			flm_nthw_rcp_txplm(be->p_flm_nthw,
+					 flm->v17.rcp[index + i].txplm);
+			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+						    flm->v17.rcp[index + i].auto_ipv4_mask);
+			flm_nthw_rcp_flush(be->p_flm_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+				      &flm->v17.buf_ctrl->lrn_free,
+				      &flm->v17.buf_ctrl->inf_avail,
+				      &flm->v17.buf_ctrl->sta_avail);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	if (flm->ver >= 17) {
+		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+		flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+				       1);
+		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.lrn_ignore->cnt, 1);
+		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+				       1);
+		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+				       1);
+		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.unl_ignore->cnt, 1);
+		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+				       1);
+		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.rel_ignore->cnt, 1);
+		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+				       1);
+		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.aul_ignore->cnt, 1);
+		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+				       1);
+		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+				       1);
+		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+				       1);
+		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+					 &flm->v17.prb_ignore->cnt, 1);
+	}
+	if (flm->ver >= 20) {
+		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+				       1);
+		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+				       1);
+		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+				       1);
+		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+				       1);
+		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+				       1);
+		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+					1);
+		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+				       1);
+	}
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+			      const uint32_t *lrn_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+				       &flm->v17.buf_ctrl->lrn_free,
+				       &flm->v17.buf_ctrl->inf_avail,
+				       &flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *inf_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+			       uint32_t *sta_data, uint32_t size)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+	int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+					&flm->v17.buf_ctrl->lrn_free,
+					&flm->v17.buf_ctrl->inf_avail,
+					&flm->v17.buf_ctrl->sta_avail);
+
+	_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+	return ret;
+}
+
+/*
+ *  *****************  HSH  *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+			  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+			   0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+	if (hsh->ver == 5) {
+		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].load_dist_type);
+			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+						   hsh->v5.rcp[category + i].mac_port_mask);
+			hsh_nthw_rcp_sort(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].sort);
+			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw0_pe);
+			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw0_ofs);
+			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].qw4_pe);
+			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].qw4_ofs);
+			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w8_pe);
+			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w8_ofs);
+			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w8_sort);
+			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].w9_pe);
+			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].w9_ofs);
+			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+					  hsh->v5.rcp[category + i].w9_sort);
+			hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+				       hsh->v5.rcp[category + i].w9_p);
+			hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+					 hsh->v5.rcp[category + i].p_mask);
+			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].word_mask);
+			hsh_nthw_rcp_seed(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].seed);
+			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+					hsh->v5.rcp[category + i].tnl_p);
+			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+					       hsh->v5.rcp[category + i].hsh_valid);
+			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+					   hsh->v5.rcp[category + i].hsh_type);
+			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+						    hsh->v5.rcp[category + i].auto_ipv4_mask);
+			hsh_nthw_rcp_flush(be->p_hsh_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  HST  *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+			  (module_get_minor_version(be->p_hst_nthw->m_hst) &
+			   0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+	if (hst->ver == 2) {
+		hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+			hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+						hst->v2.rcp[category + i].strip_mode);
+			hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_dyn);
+			hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+					       hst->v2.rcp[category + i].start_ofs);
+			hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_dyn);
+			hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+					  hst->v2.rcp[category + i].end_ofs);
+			hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_cmd);
+			hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_dyn);
+			hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif0_ofs);
+			hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif0_value);
+			hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_cmd);
+			hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_dyn);
+			hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif1_ofs);
+			hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif1_value);
+			hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_cmd);
+			hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_dyn);
+			hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+						hst->v2.rcp[category + i].modif2_ofs);
+			hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+						  hst->v2.rcp[category + i].modif2_value);
+			hst_nthw_rcp_flush(be->p_hst_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  QSL  *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+			  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+			   0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+			qsl_nthw_rcp_discard(be->p_qsl_nthw,
+					   qsl->v7.rcp[category + i].discard);
+			qsl_nthw_rcp_drop(be->p_qsl_nthw,
+					qsl->v7.rcp[category + i].drop);
+			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_lo);
+			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+					 qsl->v7.rcp[category + i].tbl_hi);
+			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_idx);
+			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+					  qsl->v7.rcp[category + i].tbl_msk);
+			qsl_nthw_rcp_lr(be->p_qsl_nthw,
+				      qsl->v7.rcp[category + i].lr);
+			qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].tsa);
+			qsl_nthw_rcp_vli(be->p_qsl_nthw,
+				       qsl->v7.rcp[category + i].vli);
+			qsl_nthw_rcp_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qst_queue(be->p_qsl_nthw,
+					 qsl->v7.qst[entry + i].queue);
+			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+			qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+					  qsl->v7.qst[entry + i].tx_port);
+			qsl_nthw_qst_lre(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].lre);
+			qsl_nthw_qst_tci(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].tci);
+			qsl_nthw_qst_ven(be->p_qsl_nthw,
+				       qsl->v7.qst[entry + i].ven);
+			qsl_nthw_qst_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			 int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+			qsl_nthw_qen_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+			  int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+	if (qsl->ver == 7) {
+		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+						 qsl->v7.unmq[entry + i].dest_queue);
+			qsl_nthw_unmq_en(be->p_qsl_nthw,
+				       qsl->v7.unmq[entry + i].en);
+			qsl_nthw_unmq_flush(be->p_qsl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC  *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+			  (module_get_minor_version(be->p_slc_nthw->m_slc) &
+			   0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+	if (slc->ver == 1) {
+		slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+			slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+						 slc->v1.rcp[category + i].tail_slc_en);
+			slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_dyn);
+			slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+					   slc->v1.rcp[category + i].tail_ofs);
+			slc_nthw_rcp_pcap(be->p_slc_nthw,
+					slc->v1.rcp[category + i].pcap);
+			slc_nthw_rcp_flush(be->p_slc_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+			   << 16) |
+			  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+			   0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+	if (slc_lr->ver == 2) {
+		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+						    slc_lr->v2.rcp[category + i].tail_slc_en);
+			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_dyn);
+			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+						 slc_lr->v2.rcp[category + i].tail_ofs);
+			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+					  slc_lr->v2.rcp[category + i].pcap);
+			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  PDB  *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+			  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+			   0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+						pdb->v9.rcp[category + i].descriptor);
+			pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].desc_len);
+			pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].tx_port);
+			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+					       pdb->v9.rcp[category + i].tx_ignore);
+			pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].tx_now);
+			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].crc_overwrite);
+			pdb_nthw_rcp_align(be->p_pdb_nthw,
+					 pdb->v9.rcp[category + i].align);
+			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_dyn);
+			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs0_rel);
+			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_dyn);
+			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs1_rel);
+			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_dyn);
+			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+					   pdb->v9.rcp[category + i].ofs2_rel);
+			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+						 pdb->v9.rcp[category + i].ip_prot_tnl);
+			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+					  pdb->v9.rcp[category + i].ppc_hsh);
+			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+						  pdb->v9.rcp[category + i].duplicate_en);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].duplicate_bit);
+			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+						   pdb->v9.rcp[category + i].pcap_keep_fcs);
+			pdb_nthw_rcp_flush(be->p_pdb_nthw);
+		}
+	}
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+	if (pdb->ver == 9) {
+		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+		pdb_nthw_config_flush(be->p_pdb_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  IOA  *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+			  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+			   0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			 int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+			ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+						ioa->v4.rcp[category + i].tunnel_pop);
+			ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pop);
+			ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+					       ioa->v4.rcp[category + i].vlan_push);
+			ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_vid);
+			ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_dei);
+			ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].vlan_pcp);
+			ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+						   ioa->v4.rcp[category + i].vlan_tpid_sel);
+			ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+						       ioa->v4.rcp[category + i].queue_override_en);
+			ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+					   ioa->v4.rcp[category + i].queue_id);
+			ioa_nthw_rcp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_0);
+		ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+						 ioa->v4.tpid->cust_tpid_1);
+		ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+	if (ioa->ver == 4) {
+		ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+			ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+						     ioa->v4.roa_epp[index + i].push_tunnel);
+			ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+						 ioa->v4.roa_epp[index + i].tx_port);
+			ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  ROA  *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+			  (module_get_minor_version(be->p_roa_nthw->m_roa) &
+			   0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+		for (int i = 0; i < cnt; i++) {
+			for (int ii = 0; ii < 4; ii++) {
+				roa_nthw_tun_hdr_select(be->p_roa_nthw,
+						     index + (i * 4) + ii);
+				roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+							    &roa->v6.tunhdr[index / 4 + i]
+							    .tunnel_hdr[ii * 4]);
+				roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+			}
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int category, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+			roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].tun_len);
+			roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_type);
+			roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].tun_vlan);
+			roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+						 roa->v6.tuncfg[category + i].ip_type);
+			roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].ipcs_upd);
+			roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].ipcs_precalc);
+			roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+						  roa->v6.tuncfg[category + i].iptl_upd);
+			roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+						      roa->v6.tuncfg[category + i].iptl_precalc);
+			roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+				roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+			roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+						   roa->v6.tuncfg[category + i].tx_lag_ix);
+			roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirculate);
+			roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].push_tunnel);
+			roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+						     roa->v6.tuncfg[category + i].recirc_port);
+			roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+						       roa->v6.tuncfg[category + i].recirc_bypass);
+			roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+					     roa->v6.config->fwd_recirculate);
+		roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+					    roa->v6.config->fwd_normal_pcks);
+		roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport0);
+		roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+					 roa->v6.config->fwd_txport1);
+		roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+						      roa->v6.config->fwd_cellbuilder_pcks);
+		roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+						    roa->v6.config->fwd_non_normal_pcks);
+		roa_nthw_config_flush(be->p_roa_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+			    int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+	if (roa->ver == 6) {
+		roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+			roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+						     roa->v6.lagcfg[index + i].txphy_port);
+			roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  RMC  *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+			  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+			   0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+	if (rmc->ver == 0x10003) {
+		rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_statt);
+		rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+				       rmc->v1_3.ctrl->block_keepa);
+		rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->block_rpp_slice);
+		rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+					 rmc->v1_3.ctrl->block_mac_port);
+		rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+					  rmc->v1_3.ctrl->lag_phy_odd_even);
+		rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+	}
+
+	_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  TPE  *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+	       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+	       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	const uint32_t csu_version =
+		(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+			   (module_get_minor_version(be->p_csu_nthw->m_csu) &
+			    0xffff));
+
+	const uint32_t hfu_version =
+		(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+			   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+			    0xffff));
+
+	const uint32_t rpp_lr_version =
+		(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+			    << 16) |
+			   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+			    0xffff));
+
+	const uint32_t tx_cpy_version =
+		(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+			    0xffff));
+
+	const uint32_t tx_ins_version =
+		(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+			    0xffff));
+
+	const uint32_t tx_rpl_version =
+		(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+			    << 16) |
+			   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+			    0xffff));
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 1;
+
+	if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+			tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+		return 2;
+
+	assert(false);
+	return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 1) {
+		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+					 rpp_lr->v1.rpp_rcp[index + i].exp);
+			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+				 int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+	if (rpp_lr->ver >= 2) {
+		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+			rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+					   rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+						rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+	return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+			     int index, int cnt)
+{
+	int res = 0;
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+	if (ifr->ver >= 2) {
+		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+			ifr_nthw_rcp_en(be->p_ifr_nthw,
+				      ifr->v2.ifr_rcp[index + i].en);
+			ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+				       ifr->v2.ifr_rcp[index + i].mtu);
+			ifr_nthw_rcp_flush(be->p_ifr_nthw);
+		}
+	} else {
+		res = -1;
+	}
+	_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+	return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+	if (tx_ins->ver >= 1) {
+		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].dyn);
+			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].ofs);
+			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+					 tx_ins->v1.ins_rcp[index + i].len);
+			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].dyn);
+			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].ofs);
+			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+					 tx_rpl->v1.rpl_rcp[index + i].len);
+			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+						 tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+						tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+	if (tx_rpl->ver >= 1) {
+		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+					   tx_rpl->v1.rpl_rpl[index + i].value);
+			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+	return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+	unsigned int wr_index = -1;
+
+	_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+	if (tx_cpy->ver >= 1) {
+		for (int i = 0; i < cnt; i++) {
+			if (wr_index !=
+					(index + i) / tx_cpy->nb_rcp_categories) {
+				wr_index =
+					(index + i) / tx_cpy->nb_rcp_categories;
+				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+						    1);
+			}
+
+			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+						  (index + i) % tx_cpy->nb_rcp_categories);
+			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+				tx_cpy->v1.cpy_rcp[index + i].reader_select);
+			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].dyn);
+			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].ofs);
+			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+					    tx_cpy->v1.cpy_rcp[index + i].len);
+			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+	return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+	if (hfu->ver >= 1) {
+		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+					  hfu->v1.hfu_rcp[index + i].len_a_wr);
+			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+						  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_b_wr);
+			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+					      hfu->v1.hfu_rcp[index + i].len_c_wr);
+			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+						   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].ttl_wr);
+			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+						 hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+			hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].cs_inf);
+			hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l3_prt);
+			hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].l3_frag);
+			hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].tunnel);
+			hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+					   hfu->v1.hfu_rcp[index + i].l4_prt);
+			hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+			hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+			hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+			hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+					    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+			hfu_nthw_rcp_flush(be->p_hfu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+	return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+			     int index, int cnt)
+{
+	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+	_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+	if (csu->ver >= 1) {
+		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+		for (int i = 0; i < cnt; i++) {
+			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol3_cmd);
+			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].ol4_cmd);
+			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il3_cmd);
+			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+						  csu->v1.csu_rcp[index + i].il4_cmd);
+			csu_nthw_rcp_flush(be->p_csu_nthw);
+		}
+	}
+
+	_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+	return 0;
+}
+
+/*
+ *  *****************  DBS  *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+	(void)be_dev;
+	(void)queue_id;
+	printf("ERROR alloc Rx queue\n");
+	return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+	(void)be_dev;
+	(void)hw_queue;
+	printf("ERROR free Rx queue\n");
+	return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+	1,
+
+	set_debug_mode,
+	get_nb_phy_ports,
+	get_nb_rx_ports,
+	get_ltx_avail,
+	get_nb_cat_funcs,
+	get_nb_categories,
+	get_nb_cat_km_if_cnt,
+	get_nb_cat_km_if_m0,
+	get_nb_cat_km_if_m1,
+	get_nb_queues,
+	get_nb_km_flow_types,
+	get_nb_pm_ext,
+	get_nb_len,
+	get_kcc_size,
+	get_kcc_banks,
+	get_nb_km_categories,
+	get_nb_km_cam_banks,
+	get_nb_km_cam_record_words,
+	get_nb_km_cam_records,
+	get_nb_km_tcam_banks,
+	get_nb_km_tcam_bank_width,
+	get_nb_flm_categories,
+	get_nb_flm_size_mb,
+	get_nb_flm_entry_size,
+	get_nb_flm_variant,
+	get_nb_flm_prios,
+	get_nb_flm_pst_profiles,
+	get_nb_hst_categories,
+	get_nb_qsl_categories,
+	get_nb_qsl_qst_entries,
+	get_nb_pdb_categories,
+	get_nb_ioa_categories,
+	get_nb_roa_categories,
+	get_nb_tpe_categories,
+	get_nb_tx_cpy_writers,
+	get_nb_tx_cpy_mask_mem,
+	get_nb_tx_rpl_depth,
+	get_nb_tx_rpl_ext_categories,
+	get_nb_tpe_ifr_categories,
+
+	alloc_rx_queue,
+	free_rx_queue,
+
+	cat_get_present,
+	cat_get_version,
+	cat_cfn_flush,
+
+	cat_kce_flush,
+	cat_kcs_flush,
+	cat_fte_flush,
+
+	cat_cte_flush,
+	cat_cts_flush,
+	cat_cot_flush,
+	cat_cct_flush,
+	cat_exo_flush,
+	cat_rck_flush,
+	cat_len_flush,
+	cat_kcc_flush,
+	cat_cce_flush,
+	cat_ccs_flush,
+
+	km_get_present,
+	km_get_version,
+	km_rcp_flush,
+	km_cam_flush,
+	km_tcam_flush,
+	km_tci_flush,
+	km_tcq_flush,
+
+	flm_get_present,
+	flm_get_version,
+	flm_control_flush,
+	flm_status_flush,
+	flm_status_update,
+	flm_timeout_flush,
+	flm_scrub_flush,
+	flm_load_bin_flush,
+	flm_load_pps_flush,
+	flm_load_lps_flush,
+	flm_load_aps_flush,
+	flm_prio_flush,
+	flm_pst_flush,
+	flm_rcp_flush,
+	flm_buf_ctrl_update,
+	flm_stat_update,
+	flm_lrn_data_flush,
+	flm_inf_data_update,
+	flm_sta_data_update,
+
+	hsh_get_present,
+	hsh_get_version,
+	hsh_rcp_flush,
+
+	hst_get_present,
+	hst_get_version,
+	hst_rcp_flush,
+
+	qsl_get_present,
+	qsl_get_version,
+	qsl_rcp_flush,
+	qsl_qst_flush,
+	qsl_qen_flush,
+	qsl_unmq_flush,
+
+	slc_get_present,
+	slc_get_version,
+	slc_rcp_flush,
+
+	slc_lr_get_present,
+	slc_lr_get_version,
+	slc_lr_rcp_flush,
+
+	pdb_get_present,
+	pdb_get_version,
+	pdb_rcp_flush,
+	pdb_config_flush,
+
+	ioa_get_present,
+	ioa_get_version,
+	ioa_rcp_flush,
+	ioa_special_tpid_flush,
+	ioa_roa_epp_flush,
+
+	roa_get_present,
+	roa_get_version,
+	roa_tunhdr_flush,
+	roa_tuncfg_flush,
+	roa_config_flush,
+	roa_lagcfg_flush,
+
+	rmc_get_present,
+	rmc_get_version,
+	rmc_ctrl_flush,
+
+	tpe_get_present,
+	tpe_get_version,
+	tpe_rpp_rcp_flush,
+	tpe_rpp_ifr_rcp_flush,
+	tpe_ifr_rcp_flush,
+	tpe_ins_rcp_flush,
+	tpe_rpl_rcp_flush,
+	tpe_rpl_ext_flush,
+	tpe_rpl_rpl_flush,
+	tpe_cpy_rcp_flush,
+	tpe_hfu_rcp_flush,
+	tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **dev)
+{
+	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+	struct info_nthw *pinfonthw = info_nthw_new();
+
+	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+	/* Init nthw CAT */
+	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct cat_nthw *pcatnthw = cat_nthw_new();
+
+		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+	} else {
+		be_devs[physical_adapter_no].p_cat_nthw = NULL;
+	}
+	/* Init nthw KM */
+	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct km_nthw *pkmnthw = km_nthw_new();
+
+		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_km_nthw = NULL;
+	}
+	/* Init nthw FLM */
+	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct flm_nthw *pflmnthw = flm_nthw_new();
+
+		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+	} else {
+		be_devs[physical_adapter_no].p_flm_nthw = NULL;
+	}
+	/* Init nthw IFR */
+	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+	}
+	/* Init nthw HSH */
+	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+	}
+	/* Init nthw HST */
+	if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hst_nthw *phstnthw = hst_nthw_new();
+
+		hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+	} else {
+		be_devs[physical_adapter_no].p_hst_nthw = NULL;
+	}
+	/* Init nthw QSL */
+	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+	} else {
+		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+	}
+	/* Init nthw SLC */
+	if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_nthw *pslcnthw = slc_nthw_new();
+
+		slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_nthw = NULL;
+	}
+	/* Init nthw SLC LR */
+	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+	} else {
+		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+	}
+	/* Init nthw PDB */
+	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+	} else {
+		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+	}
+	/* Init nthw IOA */
+	if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+		ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+	} else {
+		be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+	}
+	/* Init nthw ROA */
+	if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct roa_nthw *proanthw = roa_nthw_new();
+
+		roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+	} else {
+		be_devs[physical_adapter_no].p_roa_nthw = NULL;
+	}
+	/* Init nthw RMC */
+	if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+		rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+	} else {
+		be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+	}
+	/* Init nthw HFU */
+	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct hfu_nthw *ptr = hfu_nthw_new();
+
+		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+	}
+	/* Init nthw RPP_LR */
+	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+	}
+	/* Init nthw TX_CPY */
+	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+	}
+	/* Init nthw CSU */
+	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct csu_nthw *ptr = csu_nthw_new();
+
+		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_csu_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_csu_nthw = NULL;
+	}
+	/* Init nthw TX_INS */
+	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+	}
+	/* Init nthw TX_RPL */
+	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+	} else {
+		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+	}
+	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+	*dev = (void *)&be_devs[physical_adapter_no];
+
+	return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+	info_nthw_delete(be_dev->p_info_nthw);
+	cat_nthw_delete(be_dev->p_cat_nthw);
+	km_nthw_delete(be_dev->p_km_nthw);
+	flm_nthw_delete(be_dev->p_flm_nthw);
+	hsh_nthw_delete(be_dev->p_hsh_nthw);
+	hst_nthw_delete(be_dev->p_hst_nthw);
+	qsl_nthw_delete(be_dev->p_qsl_nthw);
+	slc_nthw_delete(be_dev->p_slc_nthw);
+	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+	pdb_nthw_delete(be_dev->p_pdb_nthw);
+	ioa_nthw_delete(be_dev->p_ioa_nthw);
+	roa_nthw_delete(be_dev->p_roa_nthw);
+	rmc_nthw_delete(be_dev->p_rmc_nthw);
+	csu_nthw_delete(be_dev->p_csu_nthw);
+	hfu_nthw_delete(be_dev->p_hfu_nthw);
+	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
new file mode 100644
index 0000000000..17fdcada3f
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+		void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
new file mode 100644
index 0000000000..90aeb71bd7
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no)
+{
+	void *be_dev = NULL;
+	struct flow_nic_dev *flow_nic;
+
+	NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+	const struct flow_api_backend_ops *iface =
+		bin_flow_backend_init(p_fpga, &be_dev);
+
+	flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+	if (!flow_nic) {
+		*p_flow_device = NULL;
+		return -1;
+	}
+	*p_flow_device = flow_nic;
+	return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+	void *be_dev = flow_api_get_be_dev(dev);
+
+	int res = flow_api_done(dev);
+
+	if (be_dev)
+		bin_flow_backend_done(be_dev);
+	return res;
+}
diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
new file mode 100644
index 0000000000..8ea21a614a
--- /dev/null
+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+		   int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v16 7/8] net/ntnic: adds ethdev and makes PMD available
  2023-09-08 16:07 ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (4 preceding siblings ...)
  2023-09-08 16:07   ` [PATCH v16 6/8] net/ntnic: adds flow logic Mykola Kostenok
@ 2023-09-08 16:07   ` Mykola Kostenok
  2023-09-08 16:07   ` [PATCH v16 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
  2023-09-15 15:54   ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Ferruh Yigit
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-08 16:07 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

Hooks into the DPDK API, and make the PMD available to use.
Also adds documentation as .rst and .ini files.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
* Fix supported platform list
v3:
* Fix Fedora 38 compilation issues
v5:
* Add ntnic.rst to index file
v10:
* Fix wrong queue id range.
v11:
* Repace stdatomic by compiler build-in atomic.
v13:
* Fix typo spelling warnings
---
 .mailmap                                      |    2 +
 MAINTAINERS                                   |    7 +
 doc/guides/nics/features/ntnic.ini            |   50 +
 doc/guides/nics/index.rst                     |    1 +
 doc/guides/nics/ntnic.rst                     |  235 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +
 drivers/net/ntnic/include/ntos_system.h       |   23 +
 drivers/net/ntnic/meson.build                 |   13 +
 drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++
 drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +
 drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++
 drivers/net/ntnic/ntnic_ethdev.h              |  355 ++
 .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +
 drivers/net/ntnic/ntnic_hshconfig.c           |  102 +
 drivers/net/ntnic/ntnic_hshconfig.h           |    9 +
 drivers/net/ntnic/ntnic_meter.c               |  811 ++++
 drivers/net/ntnic/ntnic_meter.h               |   10 +
 drivers/net/ntnic/ntnic_vdpa.c                |  365 ++
 drivers/net/ntnic/ntnic_vdpa.h                |   21 +
 drivers/net/ntnic/ntnic_vf.c                  |   83 +
 drivers/net/ntnic/ntnic_vf.h                  |   17 +
 drivers/net/ntnic/ntnic_vf_vdpa.c             | 1235 +++++
 drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +
 drivers/net/ntnic/ntnic_vfio.c                |  321 ++
 drivers/net/ntnic/ntnic_vfio.h                |   31 +
 drivers/net/ntnic/ntnic_xstats.c              |  703 +++
 drivers/net/ntnic/ntnic_xstats.h              |   22 +
 29 files changed, 12501 insertions(+)
 create mode 100644 doc/guides/nics/features/ntnic.ini
 create mode 100644 doc/guides/nics/ntnic.rst
 create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h
 create mode 100644 drivers/net/ntnic/include/ntos_system.h
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.c
 create mode 100644 drivers/net/ntnic/ntnic_ethdev.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c
 create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c
 create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h
 create mode 100644 drivers/net/ntnic/ntnic_meter.c
 create mode 100644 drivers/net/ntnic/ntnic_meter.h
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vf.c
 create mode 100644 drivers/net/ntnic/ntnic_vf.h
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c
 create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h
 create mode 100644 drivers/net/ntnic/ntnic_vfio.c
 create mode 100644 drivers/net/ntnic/ntnic_vfio.h
 create mode 100644 drivers/net/ntnic/ntnic_xstats.c
 create mode 100644 drivers/net/ntnic/ntnic_xstats.h

diff --git a/.mailmap b/.mailmap
index 864d33ee46..be8880971d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>
 Choonho Son <choonho.son@gmail.com>
 Chris Metcalf <cmetcalf@mellanox.com>
 Christian Ehrhardt <christian.ehrhardt@canonical.com>
+Christian Koue Muf <ckm@napatech.com>
 Christian Maciocco <christian.maciocco@intel.com>
 Christophe Fontaine <cfontain@redhat.com>
 Christophe Grosse <christophe.grosse@6wind.com>
@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>
 Murphy Yang <murphyx.yang@intel.com>
 Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>
 Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>
+Mykola Kostenok <mko-plv@napatech.com>
 Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>
 Nagadheeraj Rottela <rnagadheeraj@marvell.com>
 Naga Harish K S V <s.v.naga.harish.k@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 698608cdb2..fbe19449c2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,13 @@ F: drivers/net/memif/
 F: doc/guides/nics/memif.rst
 F: doc/guides/nics/features/memif.ini
 
+NTNIC PMD
+M: Mykola Kostenok <mko-plv@napatech.com>
+M: Christiam Muf <ckm@napatech.com>
+F: drivers/net/ntnic/
+F: doc/guides/nics/ntnic.rst
+F: doc/guides/nics/features/ntnic.ini
+
 
 Crypto Drivers
 --------------
diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
new file mode 100644
index 0000000000..2583e12b1f
--- /dev/null
+++ b/doc/guides/nics/features/ntnic.ini
@@ -0,0 +1,50 @@
+;
+; Supported features of the 'ntnic' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link status          = Y
+Queue start/stop     = Y
+Shared Rx queue      = Y
+MTU update           = Y
+Promiscuous mode     = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+Inner RSS            = Y
+CRC offload          = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Inner L3 checksum    = Y
+Inner L4 checksum    = Y
+Basic stats          = Y
+Extended stats       = Y
+FW version           = Y
+Linux                = Y
+x86-64               = Y
+
+[rte_flow items]
+any                  = Y
+eth                  = Y
+gtp                  = Y
+ipv4                 = Y
+ipv6                 = Y
+port_id              = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = Y
+
+[rte_flow actions]
+drop                 = Y
+jump                 = Y
+meter                = Y
+modify_field         = Y
+port_id              = Y
+queue                = Y
+raw_decap            = Y
+raw_encap            = Y
+rss                  = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 7bfcac880f..c14bc7988a 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -53,6 +53,7 @@ Network Interface Controller Drivers
     nfb
     nfp
     ngbe
+    ntnic
     null
     octeon_ep
     octeontx
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
new file mode 100644
index 0000000000..85c58543dd
--- /dev/null
+++ b/doc/guides/nics/ntnic.rst
@@ -0,0 +1,235 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2023 Napatech A/S
+
+NTNIC Poll Mode Driver
+======================
+
+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.
+
+
+Design
+------
+
+The NTNIC PMD is designed as a pure user-space driver, and requires no special
+Napatech kernel modules.
+
+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses
+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is
+exclusive, so only one process should access it. The physical ports are located
+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more
+TX and RX queues each.
+
+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel
+driver is bound to the VFs. The VFs implement virtio data plane only and the VF
+configuration is done by NTNIC PMD through PF0. Each VF can be configured with
+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.
+The number of VFs is limited by the number of queues supported by the FPGA,
+and the number of queue pairs allocated for each VF. Current FPGA supports 128
+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).
+
+As the Napatech smartNICs supports sensors and monitoring beyond what is
+available in the DPDK API, the PMD includes the ntconnect socket interface.
+ntconnect additionally allows Napatech to implement specific customer requests
+that are not supported by the DPDK API.
+
+
+Supported NICs
+--------------
+
+- NT200A02 2x100G SmartNIC
+
+    - FPGA ID 9563 (Inline Flow Management)
+
+
+Features
+--------
+
+- Multiple TX and RX queues.
+- Scattered and gather for TX and RX.
+- RSS based on VLAN or 5-tuple.
+- RSS using different combinations of fields: L3 only, L4 only or both, and
+    source only, destination only or both.
+- Several RSS hash keys, one for each flow type.
+- Default RSS operation with no hash key specification.
+- VLAN filtering.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
+- Hairpin.
+- HW checksum offload of RX and hairpin.
+- Promiscuous mode on PF and VF.
+- Flow API.
+- Multiple process.
+- Tunnel types: GTP.
+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum
+    verification.
+- Support for multiple rte_flow groups.
+- Encapsulation and decapsulation of GTP data.
+- Packet modification: NAT, TTL decrement, DSCP tagging
+- Traffic mirroring.
+- Jumbo frame support.
+- Port and queue statistics.
+- RMON statistics in extended stats.
+- Flow metering, including meter policy API.
+- Link state information.
+- CAM and TCAM based matching.
+- Exact match of 140 million flows and policies.
+
+
+Limitations
+~~~~~~~~~~~
+
+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci
+support for creating VFs from the PF which is required for the PMD to use
+vfio-pci on the PF. This support has been back-ported to older Linux
+distributions and they are also supported. If vfio-pci is not required kernel
+version 4.18 is supported.
+
+Current NTNIC PMD implementation only supports one active adapter.
+
+
+Configuration
+-------------
+
+Command line arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following standard DPDK command line arguments are used by the PMD:
+
+    -a: Used to specifically define the NT adapter by PCI ID.
+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.
+
+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
+
+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
+
+The NTNIC specific argument format is::
+
+    <object>.<attribute>=[<object-ids>:]<value>
+
+Multiple arguments for the same device are separated by ‘,’ comma.
+<object-ids> can be a single value or a range.
+
+
+- ``rxqs`` parameter [int]
+
+    Specify number of RX queues to use.
+
+    To specify number of RX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``txqs`` parameter [int]
+
+    Specify number of TX queues to use.
+
+    To specify number of TX queues::
+
+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4
+
+    By default, the value is set to 1.
+
+- ``exception_path`` parameter [int]
+
+    Enable exception path for unmatched packets to go through queue 0.
+
+    To enable exception_path::
+
+        -a <domain>:<bus>:00.0,exception_path=1
+
+    By default, the value is set to 0.
+
+- ``port.link_speed`` parameter [list]
+
+    This parameter is used to set the link speed on physical ports in the format::
+
+        port.link_speed=<port>:<link speed in Mbps>
+
+    To set up link speeds::
+
+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000
+
+    By default, set to the maximum corresponding to the NIM bit rate.
+
+- ``supported-fpgas`` parameter [str]
+
+    List the supported FPGAs for a compiled NTNIC DPDK-driver.
+
+    This parameter has two options::
+
+        - list.
+        - verbose.
+
+    Example usages::
+
+        -a <domain>:<bus>:00.0,supported-fpgas=list
+        -a <domain>:<bus>:00.0,supported-fpgas=verbose
+
+- ``help`` parameter [none]
+
+    List all available NTNIC PMD parameters.
+
+
+Build options
+~~~~~~~~~~~~~
+
+- ``NT_TOOLS``
+
+    Define that enables the PMD ntconnect source code.
+
+    Default: Enabled.
+
+- ``NT_VF_VDPA``
+
+    Define that enables the PMD VF VDPA source code.
+
+    Default: Enabled.
+
+- ``NT_RELAY_CORE``
+
+    Define that enables the PMD replay core source code. The relay core is used
+    by Napatech's vSwitch PMD profile in an OVS environment.
+
+    Default: Disabled.
+
+
+Logging and Debugging
+---------------------
+
+NTNIC supports several groups of logging that can be enabled with ``log-level``
+parameter:
+
+- ETHDEV.
+
+    Logging info from the main PMD code. i.e. code that is related to DPDK::
+
+        --log-level=ntnic.ethdev,8
+
+- NTHW.
+
+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::
+
+        --log-level=ntnic.nthw,8
+
+- vDPA.
+
+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::
+
+        --log-level=ntnic.vdpa,8
+
+- FILTER.
+
+    Logging info from filter. i.e. code that is related to the binary filter::
+
+        --log-level=ntnic.filter,8
+
+- FPGA.
+
+    Logging related to FPGA::
+
+        --log-level=ntnic.fpga,8
+
+To enable logging on all levels use wildcard in the following way::
+
+    --log-level=ntnic.*,8
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
new file mode 100644
index 0000000000..e9c38fc330
--- /dev/null
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTDRV_4GA_H__
+#define __NTDRV_4GA_H__
+
+#include "nthw_drv.h"
+#include "nt4ga_adapter.h"
+#include "nthw_platform_drv.h"
+
+typedef struct ntdrv_4ga_s {
+	uint32_t pciident;
+	struct adapter_info_s adapter_info;
+	char *p_drv_name;
+
+	volatile bool b_shutdown;
+	pthread_mutex_t stat_lck;
+	pthread_t stat_thread;
+	pthread_t flm_thread;
+} ntdrv_4ga_t;
+
+#endif /* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h
new file mode 100644
index 0000000000..0adfe86cc3
--- /dev/null
+++ b/drivers/net/ntnic/include/ntos_system.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTOS_SYSTEM_H__
+#define __NTOS_SYSTEM_H__
+
+#include "include/ntdrv_4ga.h"
+
+/*
+ * struct drv_s for DPDK (clone of kernel struct)
+ * keep it as close as possible to original kernel struct
+ */
+struct drv_s {
+	int adapter_no;
+	struct rte_pci_device *p_dev;
+	struct ntdrv_4ga_s ntdrv;
+
+	int n_eth_dev_init_count;
+	int probe_finished;
+};
+
+#endif /* __NTOS_SYSTEM_H__ */
diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index 0ae574f9ca..f7454ffb79 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -27,6 +27,9 @@ includes = [
     include_directories('sensors/ntavr'),
 ]
 
+# deps
+deps += 'vhost'
+
 # all sources
 sources = files(
     'adapter/nt4ga_adapter.c',
@@ -113,6 +116,16 @@ sources = files(
     'nthw/nthw_stat.c',
     'nthw/supported/nthw_fpga_9563_055_024_0000.c',
     'ntlog/ntlog.c',
+    'ntnic_dbsconfig.c',
+    'ntnic_ethdev.c',
+    'ntnic_filter/ntnic_filter.c',
+    'ntnic_hshconfig.c',
+    'ntnic_meter.c',
+    'ntnic_vdpa.c',
+    'ntnic_vf.c',
+    'ntnic_vfio.c',
+    'ntnic_vf_vdpa.c',
+    'ntnic_xstats.c',
     'ntutil/nt_util.c',
     'sensors/avr_sensors/avr_sensors.c',
     'sensors/board_sensors/board_sensors.c',
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c
new file mode 100644
index 0000000000..2217c163ad
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.c
@@ -0,0 +1,1670 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+
+#include "ntdrv_4ga.h"
+#include "nt_util.h"
+#include "ntnic_dbsconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntlog.h"
+
+#define STRUCT_ALIGNMENT (4 * 1024LU)
+#define MAX_VIRT_QUEUES 128
+
+#define LAST_QUEUE 127
+#define DISABLE 0
+#define ENABLE 1
+#define RX_AM_DISABLE DISABLE
+#define RX_AM_ENABLE ENABLE
+#define RX_UW_DISABLE DISABLE
+#define RX_UW_ENABLE ENABLE
+#define RX_Q_DISABLE DISABLE
+#define RX_Q_ENABLE ENABLE
+#define RX_AM_POLL_SPEED 5
+#define RX_UW_POLL_SPEED 9
+#define HOST_ID 0
+#define INIT_QUEUE 1
+
+#define TX_AM_DISABLE DISABLE
+#define TX_AM_ENABLE ENABLE
+#define TX_UW_DISABLE DISABLE
+#define TX_UW_ENABLE ENABLE
+#define TX_Q_DISABLE DISABLE
+#define TX_Q_ENABLE ENABLE
+#define TX_AM_POLL_SPEED 5
+#define TX_UW_POLL_SPEED 8
+
+/**************************************************************************/
+
+#define le64 uint64_t
+#define le32 uint32_t
+#define le16 uint16_t
+
+/**************************************************************************/
+
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+#pragma pack(1)
+struct virtq_avail {
+	le16 flags;
+	le16 idx;
+	le16 ring[]; /* Queue size */
+};
+
+#pragma pack()
+/**************************************************************************/
+
+/* le32 is used here for ids for padding reasons. */
+#pragma pack(1)
+struct virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	le32 id;
+	/* Total length of the descriptor chain which was used (written to) */
+	le32 len;
+};
+
+#pragma pack()
+
+#define VIRTQ_USED_F_NO_NOTIFY 1
+
+#pragma pack(1)
+struct virtq_used {
+	le16 flags;
+	le16 idx;
+	struct virtq_used_elem ring[]; /* Queue size */
+};
+
+#pragma pack()
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };
+
+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
+
+struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	union {
+		struct {
+			/* SPLIT virtqueue */
+			struct virtq_avail *p_avail;
+			struct virtq_used *p_used;
+			struct virtq_desc *p_desc;
+			/* Control variables for virt-queue structs */
+			le16 am_idx;
+			le16 used_idx;
+			le16 cached_idx;
+			le16 tx_descr_avail_idx;
+		};
+		struct {
+			/* PACKED virtqueue */
+			struct pvirtq_event_suppress *driver_event;
+			struct pvirtq_event_suppress *device_event;
+			struct pvirtq_desc *desc;
+			struct {
+				le16 next;
+				le16 num;
+			} outs;
+			/*
+			 * when in-order release used Tx packets from FPGA it may collapse
+			 * into a batch. When getting new Tx buffers we may only need
+			 * partial
+			 */
+			le16 next_avail;
+			le16 next_used;
+			le16 avail_wrap_count;
+			le16 used_wrap_count;
+		};
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	enum nthw_virt_queue_usage usage;
+	uint16_t vq_type;
+	uint16_t in_order;
+	int irq_vector;
+
+	nthw_dbs_t *mp_nthw_dbs;
+	uint32_t index;
+	le16 queue_size;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port; /* Only used by TX queues */
+	uint32_t virtual_port; /* Only used by TX queues */
+	uint32_t header;
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
+};
+
+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
+
+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,
+			      uint32_t start_idx, uint32_t start_ptr)
+{
+	uint32_t busy;
+	uint32_t init;
+	uint32_t dummy;
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+
+	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
+
+	do {
+		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
+	} while (busy != 0);
+}
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
+{
+	assert(p_fpga_info);
+
+	nt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
+	nthw_dbs_t *p_nthw_dbs;
+	int res = 0;
+	uint32_t i;
+
+	p_fpga_info->mp_nthw_dbs = NULL;
+
+	p_nthw_dbs = nthw_dbs_new();
+	if (p_nthw_dbs == NULL)
+		return -1;
+
+	res = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	res = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */
+	if (res) {
+		free(p_nthw_dbs);
+		return res;
+	}
+
+	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
+
+	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
+		rxvq[i].usage = UNUSED;
+		txvq[i].usage = UNUSED;
+	}
+
+	dbs_reset(p_nthw_dbs);
+
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
+		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
+
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
+		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
+
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,
+		       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);
+	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,
+		       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);
+
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,
+		       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);
+	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,
+		       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);
+
+	return 0;
+}
+
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	size_t avail_mem =
+		sizeof(struct virtq_avail) +
+		queue_size *
+		sizeof(le16); /* + sizeof(le16); ("avail->used_event" is not used) */
+	size_t avail_mem_aligned =
+		((avail_mem % STRUCT_ALIGNMENT) == 0) ?
+		avail_mem :
+		STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(le16); ("used->avail_event" is not used) */
+	size_t used_mem =
+		sizeof(struct virtq_used) +
+		queue_size *
+		sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned =
+		((used_mem % STRUCT_ALIGNMENT) == 0) ?
+		used_mem :
+		STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+					uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, ule16 flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr =
+				(uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,
+	void *used_struct_addr, void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size,
+				    initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr,
+					 packet_buffer_descriptors,
+					 queue_size, flgs);
+}
+
+static le16 dbs_qsize_log2(le16 qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+	--qs;
+	return qs;
+}
+
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector)
+{
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data int=0 irq_vector=%u\n",
+	       __func__, irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   RX_AM_ENABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/* Save queue state */
+	rxvq[index].usage = UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0; /* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
+
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,
+			   0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rx_vq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!rx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;
+
+	if (rx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (rx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(rx_vq->queue_size);
+
+	/* Set ISTK if */
+	if (rx_vq->irq_vector >= 0 &&
+			rx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = rx_vq->irq_vector;
+		istk = 1;
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	NT_LOG(DBG, ETHDEV, "%s: set_rx_uw_data irq_vector=%u\n", __func__,
+	       rx_vq->irq_vector);
+	if (set_rx_uw_data(p_nthw_dbs, rx_vq->index,
+			   rx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)rx_vq->desc_struct_phys_addr :
+			   (uint64_t)rx_vq->used_struct_phys_addr,
+			   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),
+			   int_enable, vec, istk) != 0)
+		return -1;
+
+	/* Enable AM */
+	rx_vq->am_enable = RX_AM_ENABLE;
+	if (set_rx_am_data(p_nthw_dbs, rx_vq->index,
+			   (uint64_t)rx_vq->avail_struct_phys_addr,
+			   rx_vq->am_enable, rx_vq->host_id,
+			   PACKED(rx_vq->vq_type),
+			   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* If ISTK is set, make sure to unset it */
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,
+			   tx_vq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	tx_vq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type), 0) != 0)
+		return -1;
+
+	/* let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	if (!tx_vq) {
+		NT_LOG(ERR, ETHDEV, "%s: Invalid queue\n", __func__);
+		return -1;
+	}
+
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+
+	if (tx_vq->index >= MAX_VIRT_QUEUES)
+		return -1;
+
+	if (tx_vq->usage != UNMANAGED)
+		return -1;
+
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	/* Set ISTK if irq_vector is used */
+	if (tx_vq->irq_vector >= 0 &&
+			tx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {
+		int_enable = 1;
+		vec = tx_vq->irq_vector;
+		istk = 1; /* Use sticky interrupt */
+	} else {
+		int_enable = 0;
+		vec = 0;
+		istk = 0;
+	}
+	if (set_tx_uw_data(p_nthw_dbs, tx_vq->index,
+			   tx_vq->vq_type == PACKED_RING ?
+			   (uint64_t)tx_vq->desc_struct_phys_addr :
+			   (uint64_t)tx_vq->used_struct_phys_addr,
+			   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),
+			   int_enable, vec, istk, tx_vq->in_order) != 0)
+		return -1;
+
+	/* Enable AM */
+	tx_vq->am_enable = TX_AM_ENABLE;
+	if (set_tx_am_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->avail_struct_phys_addr,
+			   tx_vq->am_enable, tx_vq->host_id,
+			   PACKED(tx_vq->vq_type),
+			   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)
+		return -1;
+
+	return 0;
+}
+
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport)
+{
+	nthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;
+	uint32_t qs = dbs_qsize_log2(tx_vq->queue_size);
+
+	if (set_tx_dr_data(p_nthw_dbs, tx_vq->index,
+			   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,
+			   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)
+		return -1;
+	return nthw_enable_tx_virt_queue(tx_vq);
+}
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs)
+{
+	return set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);
+}
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider)
+{
+	return set_tx_qos_rate(p_nthw_dbs, multiplier, divider);
+}
+
+#define INDEX_PTR_NOT_VALID 0x80000000
+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)
+{
+	uint32_t ptr;
+	uint32_t queue;
+	uint32_t valid;
+
+	const int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);
+
+	if (status == 0) {
+		if (valid)
+			*p_index = ptr;
+		else
+			*p_index = INDEX_PTR_NOT_VALID;
+	}
+	return status;
+}
+
+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_rx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)
+{
+	return set_tx_ptr_queue(p_nthw_dbs, queue);
+}
+
+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
+{
+	uint32_t busy;
+	uint32_t queue;
+	int err = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	do {
+		if (rx)
+			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
+		else
+			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
+	} while (!err && busy);
+
+	return err;
+}
+
+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
+{
+	int err = 0;
+	uint32_t idle = 0;
+	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
+
+	err = dbs_wait_on_busy(vq, &idle, rx);
+	if (err) {
+		if (err == -ENOTSUP) {
+			NT_OS_WAIT_USEC(200000);
+			return 0;
+		}
+		return -1;
+	}
+
+	do {
+		if (rx)
+			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
+		else
+			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
+
+		if (err)
+			return -1;
+
+		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
+			return -1;
+
+	} while (idle == 0);
+
+	return 0;
+}
+
+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
+
+	if (rxvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	rxvq->used_struct_phys_addr = NULL;
+	if (set_rx_uw_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,
+			   PACKED(rxvq->vq_type), 0, 0, 0) != 0)
+		return -1;
+
+	/* Disable AM */
+	rxvq->am_enable = RX_AM_DISABLE;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	rxvq->avail_struct_phys_addr = NULL;
+	rxvq->host_id = 0;
+	if (set_rx_am_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,
+			   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	rxvq->desc_struct_phys_addr = NULL;
+	if (set_rx_dr_data(p_nthw_dbs, rxvq->index,
+			   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,
+			   rxvq->header, PACKED(rxvq->vq_type)) != 0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
+
+	/* Reset queue state */
+	rxvq->usage = UNUSED;
+	rxvq->mp_nthw_dbs = p_nthw_dbs;
+	rxvq->index = 0;
+	rxvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)
+{
+	if (rxvq == NULL || rxvq->usage != MANAGED)
+		return -1;
+
+	if (rxvq->p_virtual_addr) {
+		free(rxvq->p_virtual_addr);
+		rxvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_rx_virt_queue(rxvq);
+}
+
+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
+
+	if (txvq == NULL)
+		return -1;
+
+	/* Clear UW */
+	txvq->used_struct_phys_addr = NULL;
+	if (set_tx_uw_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,
+			   PACKED(txvq->vq_type), 0, 0, 0,
+			   txvq->in_order) != 0)
+		return -1;
+
+	/* Disable AM */
+	txvq->am_enable = TX_AM_DISABLE;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Let the FPGA finish packet processing */
+	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
+		return -1;
+
+	/* Clear rest of AM */
+	txvq->avail_struct_phys_addr = NULL;
+	txvq->host_id = 0;
+	if (set_tx_am_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,
+			   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)
+		return -1;
+
+	/* Clear DR */
+	txvq->desc_struct_phys_addr = NULL;
+	txvq->port = 0;
+	txvq->header = 0;
+	if (set_tx_dr_data(p_nthw_dbs, txvq->index,
+			   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,
+			   txvq->port, txvq->header,
+			   PACKED(txvq->vq_type)) != 0)
+		return -1;
+
+	/* Clear QP */
+	txvq->virtual_port = 0;
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=
+			0)
+		return -1;
+
+	/* Initialize queue */
+	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
+
+	/* Reset queue state */
+	txvq->usage = UNUSED;
+	txvq->mp_nthw_dbs = p_nthw_dbs;
+	txvq->index = 0;
+	txvq->queue_size = 0;
+
+	return 0;
+}
+
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != UNMANAGED)
+		return -1;
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)
+{
+	if (txvq == NULL || txvq->usage != MANAGED)
+		return -1;
+
+	if (txvq->p_virtual_addr) {
+		free(txvq->p_virtual_addr);
+		txvq->p_virtual_addr = NULL;
+	}
+
+	return dbs_internal_release_tx_virt_queue(txvq);
+}
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,
+			   host_id, qs, port, header, PACKED(vq_type)) != 0)
+		return NULL;
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			   vq_type == PACKED_RING ?
+			   (uint64_t)desc_struct_phys_addr :
+			   (uint64_t)used_struct_phys_addr,
+			   host_id, qs, PACKED(vq_type), int_enable, vec, istk,
+			   in_order) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+			   TX_AM_DISABLE, host_id, PACKED(vq_type),
+			   irq_vector >= 0 ? 1 : 0) != 0)
+		return NULL;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index,
+				   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,
+				   host_id, PACKED(vq_type),
+				   irq_vector >= 0 ? 1 : 0) != 0)
+			return NULL;
+	}
+
+	/* Save queue state */
+	txvq[index].usage = UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].header = header;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size,
+					  p_packet_buffers ? (uint16_t)queue_size : 0,
+					  VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	rxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, header,
+				 SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout =
+			dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.used_offset,
+					  (char *)p_virt_struct_area->virt_addr +
+					  virtq_struct_layout.desc_offset,
+					  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.used_offset);
+	txvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +
+				     virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (le16)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr =
+			malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+		       queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,
+				 (void *)p_virt_struct_area->phys_addr,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.used_offset,
+				 (char *)p_virt_struct_area->phys_addr +
+				 virtq_struct_layout.desc_offset,
+				 (uint16_t)queue_size, host_id, port, virtual_port,
+				 header, SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+	struct pvirtq_struct_layout_s *pvirtq_layout,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)
+{
+	/* page aligned */
+	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+	assert(p_packet_buffers);
+
+	/* clean canvas */
+	memset(p_virt_struct_area->virt_addr, 0,
+	       sizeof(struct pvirtq_desc) * vq->queue_size +
+	       sizeof(struct pvirtq_event_suppress) * 2 +
+	       sizeof(int) * vq->queue_size);
+
+	pvirtq_layout->device_event_offset =
+		sizeof(struct pvirtq_desc) * vq->queue_size;
+	pvirtq_layout->driver_event_offset =
+		pvirtq_layout->device_event_offset +
+		sizeof(struct pvirtq_event_suppress);
+
+	vq->desc = p_virt_struct_area->virt_addr;
+	vq->device_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->device_event_offset);
+	vq->driver_event = (void *)((uintptr_t)vq->desc +
+				    pvirtq_layout->driver_event_offset);
+
+	vq->next_avail = 0;
+	vq->next_used = 0;
+	vq->avail_wrap_count = 1;
+	vq->used_wrap_count = 1;
+
+	/*
+	 * Only possible if FPGA always delivers in-order
+	 * Buffer ID used is the index in the pPacketBuffers array
+	 */
+	unsigned int i;
+	struct pvirtq_desc *p_desc = vq->desc;
+
+	for (i = 0; i < vq->queue_size; i++) {
+		if (rx) {
+			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
+			p_desc[i].len = p_packet_buffers[i].len;
+		}
+		p_desc[i].id = i;
+		p_desc[i].flags = flags;
+	}
+
+	if (rx)
+		vq->avail_wrap_count ^=
+			1; /* filled up available buffers for Rx */
+	else
+		vq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */
+
+	if (vq->queue_size == 0)
+		return -1; /* don't allocate memory with size of 0 bytes */
+	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+	if (vq->p_virtual_addr == NULL)
+		return -1;
+
+	memcpy(vq->p_virtual_addr, p_packet_buffers,
+	       vq->queue_size * sizeof(*p_packet_buffers));
+
+	/* Not used yet by FPGA - make sure we disable */
+	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+	return 0;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, int irq_vector)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &rxvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+						 p_virt_struct_area, p_packet_buffers,
+						 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+		return NULL;
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, header, PACKED_RING, irq_vector);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	int irq_vector, uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct pvirtq_struct_layout_s pvirtq_layout;
+	struct nthw_virt_queue *vq = &txvq[index];
+	/* Set size and setup packed vq ring */
+	vq->queue_size = queue_size;
+	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,
+			p_virt_struct_area,
+			p_packet_buffers, 0, 0) != 0)
+		return NULL;
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,
+				 0, /* start wrap ring counter as 1 */
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.driver_event_offset),
+				 (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+					  pvirtq_layout.device_event_offset),
+				 p_virt_struct_area->phys_addr, (uint16_t)queue_size,
+				 host_id, port, virtual_port, header, PACKED_RING,
+				 irq_vector, in_order);
+
+	vq->usage = MANAGED;
+	return vq;
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,
+				 uint32_t queue_size, uint32_t host_id,
+				 uint32_t header,
+				 struct nthw_memory_descriptor *p_virt_struct_area,
+				 struct nthw_memory_descriptor *p_packet_buffers,
+				 uint32_t vq_type, int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	case PACKED_RING:
+		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,
+			index, queue_size, host_id, header,
+			p_virt_struct_area, p_packet_buffers, irq_vector);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * p_virt_struct_area - Memory that can be used for virtQueue structs
+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,
+	int irq_vector, uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	case PACKED_RING:
+		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,
+			queue_size, host_id, port, virtual_port,
+			header, irq_vector, in_order, p_virt_struct_area,
+			p_packet_buffers);
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * Packed Ring helper macros
+ */
+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
+
+#define inc_avail(_vq, _num)                               \
+	do {                                             \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_avail += num;                   \
+		if ((vq)->next_avail >= (vq)->queue_size) {   \
+			(vq)->next_avail -= (vq)->queue_size; \
+			(vq)->avail_wrap_count ^= 1;       \
+		}                                        \
+	} while (0)
+
+#define inc_used(_vq, _num)                               \
+	do {                                            \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_num) (num) = (_num); \
+		(vq)->next_used += num;                   \
+		if ((vq)->next_used >= (vq)->queue_size) {   \
+			(vq)->next_used -= (vq)->queue_size; \
+			(vq)->used_wrap_count ^= 1;       \
+		}                                       \
+	} while (0)
+
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp, uint16_t *nb_pkts)
+{
+	le16 segs = 0;
+	uint16_t pkts = 0;
+
+	if (rxvq->vq_type == SPLIT_RING) {
+		le16 i;
+		le16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+
+		if (entries_ready < n) {
+			/* Look for more packets */
+			rxvq->cached_idx = rxvq->p_used->idx;
+			entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);
+			if (entries_ready == 0) {
+				*nb_pkts = 0;
+				return 0;
+			}
+
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+
+		/* Give packets - make sure all packets are whole packets.
+		 * Valid because queue_size is always 2^n
+		 */
+		const le16 queue_mask = (le16)(rxvq->queue_size - 1);
+		const ule32 buf_len = rxvq->p_desc[0].len;
+
+		le16 used = rxvq->used_idx;
+
+		for (i = 0; i < n; ++i) {
+			le32 id = rxvq->p_used->ring[used & queue_mask].id;
+
+			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
+			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
+
+			uint32_t pkt_len =
+				((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
+
+			if (pkt_len > buf_len) {
+				/* segmented */
+				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
+
+				if (((int)i + nbsegs) > n) {
+					/* don't have enough segments - break out */
+					break;
+				}
+
+				int ii;
+
+				for (ii = 1; ii < nbsegs; ii++) {
+					++i;
+					id = rxvq->p_used
+					     ->ring[(used + ii) &
+								queue_mask]
+					     .id;
+					rp[i].addr =
+						rxvq->p_virtual_addr[id].virt_addr;
+					rp[i].len = rxvq->p_used
+						    ->ring[(used + ii) &
+								       queue_mask]
+						    .len;
+				}
+				used += nbsegs;
+			} else {
+				++used;
+			}
+
+			pkts++;
+			segs = i + 1;
+		}
+
+		rxvq->used_idx = used;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		/* This requires in-order behavior from FPGA */
+		int i;
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != rxvq->used_wrap_count ||
+					used != rxvq->used_wrap_count)
+				break;
+
+			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
+			rp[pkts].len = desc->len;
+			pkts++;
+
+			inc_used(rxvq, 1);
+		}
+
+		segs = pkts;
+	}
+
+	*nb_pkts = pkts;
+	return segs;
+}
+
+/*
+ * Put buffers back into Avail Ring
+ */
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)
+{
+	if (rxvq->vq_type == SPLIT_RING) {
+		rxvq->am_idx = (le16)(rxvq->am_idx + n);
+		rxvq->p_avail->idx = rxvq->am_idx;
+	} else if (rxvq->vq_type == PACKED_RING) {
+		int i;
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
+				    used_flag_inv(rxvq);
+		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
+
+		uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
+
+		/* Optimization point: use in-order release */
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&rxvq->desc[rxvq->next_avail];
+
+			desc->id = rxvq->next_avail;
+			desc->addr =
+				(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;
+			desc->len = len;
+			if (i)
+				desc->flags = VIRTQ_DESC_F_WRITE |
+					      avail_flag(rxvq) |
+					      used_flag_inv(rxvq);
+
+			inc_avail(rxvq, 1);
+		}
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+#define vq_log_arg(vq, format, ...)
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr)
+{
+	int m = 0;
+	le16 queue_mask = (le16)(txvq->queue_size -
+				1); /* Valid because queue_size is always 2^n */
+	*p_virt_addr = txvq->p_virtual_addr;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		cvq->s = txvq->p_desc;
+		cvq->vq_type = SPLIT_RING;
+
+		*first_idx = txvq->tx_descr_avail_idx;
+
+		le16 entries_used =
+			(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &
+			       queue_mask);
+		le16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);
+
+		vq_log_arg(txvq,
+			   "ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\n",
+			   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,
+			   entries_ready, txvq->p_used->idx);
+
+		if (entries_ready < n) {
+			/*
+			 * Look for more packets.
+			 * Using the used_idx in the avail ring since they are held synchronous
+			 * because of in-order
+			 */
+			txvq->cached_idx =
+				txvq->p_avail->ring[(txvq->p_used->idx - 1) &
+									  queue_mask];
+
+			vq_log_arg(txvq,
+				   "_update: get cachedidx %i (used_idx-1 %i)\n",
+				   txvq->cached_idx,
+				   (txvq->p_used->idx - 1) & queue_mask);
+			entries_used = (le16)((txvq->tx_descr_avail_idx -
+					      txvq->cached_idx) &
+					     queue_mask);
+			entries_ready =
+				(le16)(txvq->queue_size - 1 - entries_used);
+			vq_log_arg(txvq, "new used: %i, ready %i\n",
+				   entries_used, entries_ready);
+			if (n > entries_ready)
+				n = entries_ready;
+		}
+	} else if (txvq->vq_type == PACKED_RING) {
+		int i;
+
+		cvq->p = txvq->desc;
+		cvq->vq_type = PACKED_RING;
+
+		if (txvq->outs.num) {
+			*first_idx = txvq->outs.next;
+			uint16_t num = RTE_MIN(n, txvq->outs.num);
+
+			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
+			txvq->outs.num -= num;
+
+			if (n == num)
+				return n;
+
+			m = num;
+			n -= num;
+		} else {
+			*first_idx = txvq->next_used;
+		}
+		/* iterate the ring - this requires in-order behavior from FPGA */
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
+
+			ule16 flags = desc->flags;
+			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
+			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
+
+			if (avail != txvq->used_wrap_count ||
+					used != txvq->used_wrap_count) {
+				n = i;
+				break;
+			}
+
+			le16 incr = (desc->id - txvq->next_used) & queue_mask;
+
+			i += incr;
+			inc_used(txvq, incr + 1);
+		}
+
+		if (i > n) {
+			int outs_num = i - n;
+
+			txvq->outs.next = (txvq->next_used - outs_num) &
+					  queue_mask;
+			txvq->outs.num = outs_num;
+		}
+
+	} else {
+		return 0;
+	}
+	return m + n;
+}
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[])
+{
+	int i;
+
+	if (txvq->vq_type == SPLIT_RING) {
+		/* Valid because queue_size is always 2^n */
+		le16 queue_mask = (le16)(txvq->queue_size - 1);
+
+		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i\n", n,
+			   txvq->am_idx, txvq->tx_descr_avail_idx);
+		for (i = 0; i < n; i++) {
+			int idx = txvq->am_idx & queue_mask;
+
+			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
+			txvq->tx_descr_avail_idx =
+				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
+			txvq->am_idx++;
+		}
+		/* Make sure the ring has been updated before HW reads index update */
+		rte_mb();
+		txvq->p_avail->idx = txvq->am_idx;
+		vq_log_arg(txvq, "new avail idx %i, descr_idx %i\n",
+			   txvq->p_avail->idx, txvq->tx_descr_avail_idx);
+
+	} else if (txvq->vq_type == PACKED_RING) {
+		/*
+		 * Defer flags update on first segment - due to serialization towards HW and
+		 * when jumbo segments are added
+		 */
+
+		ule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);
+		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
+
+		for (i = 0; i < n; i++) {
+			struct pvirtq_desc *desc =
+					&txvq->desc[txvq->next_avail];
+
+			desc->id = txvq->next_avail;
+			desc->addr =
+				(ule64)txvq->p_virtual_addr[desc->id].phys_addr;
+
+			if (i)
+				/* bitwise-or here because next flags may already have been setup */
+				desc->flags |= avail_flag(txvq) |
+					       used_flag_inv(txvq);
+
+			inc_avail(txvq, 1);
+		}
+		/* Proper read barrier before FPGA may see first flags */
+		rte_rmb();
+		first_desc->flags = first_flags;
+	}
+}
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)
+{
+	uint32_t rx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);
+	do {
+		if (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (rx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(rx_ptr & 0xffff);
+	return 0;
+}
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)
+{
+	uint32_t tx_ptr;
+	uint32_t loops = 100000;
+
+	dbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);
+	do {
+		if (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)
+			return -1;
+		if (--loops == 0)
+			return -1;
+		usleep(10);
+	} while (tx_ptr == INDEX_PTR_NOT_VALID);
+
+	*index = (uint16_t)(tx_ptr & 0xffff);
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h
new file mode 100644
index 0000000000..ceae535741
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_dbsconfig.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_DBS_CONFIG_H
+#define NTNIC_DBS_CONFIG_H
+
+#include <stdint.h>
+#include "nthw_drv.h"
+
+struct nthw_virt_queue;
+
+struct nthw_memory_descriptor {
+	void *phys_addr;
+	void *virt_addr;
+	uint32_t len;
+};
+
+#define ule64 uint64_t
+#define ule32 uint32_t
+#define ule16 uint16_t
+
+#define MAX_MSIX_VECTORS_PR_VF 8
+
+#define SPLIT_RING 0
+#define PACKED_RING 1
+#define IN_ORDER 1
+#define NO_ORDER_REQUIRED 0
+
+/*
+ * SPLIT : This marks a buffer as continuing via the next field.
+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be
+ * contiguous) In Used descriptors it must be ignored
+ */
+#define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
+/*
+ * SPLIT : This means the buffer contains a list of buffer descriptors.
+ * PACKED: This means the element contains a table of descriptors.
+ */
+#define VIRTQ_DESC_F_INDIRECT 4
+
+/*
+ * Split Ring virtq Descriptor
+ */
+#pragma pack(1)
+struct virtq_desc {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+	/* The flags as indicated above. */
+	ule16 flags;
+	/* Next field if flags & NEXT */
+	ule16 next;
+};
+
+#pragma pack()
+
+/*
+ * Packed Ring special structures and defines
+ *
+ */
+
+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */
+
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL (1 << 7)
+#define VIRTQ_DESC_F_USED (1 << 15)
+
+/* descr phys address must be 16 byte aligned */
+#pragma pack(push, 16)
+struct pvirtq_desc {
+	/* Buffer Address. */
+	ule64 addr;
+	/* Buffer Length. */
+	ule32 len;
+	/* Buffer ID. */
+	ule16 id;
+	/* The flags depending on descriptor type. */
+	ule16 flags;
+};
+
+#pragma pack(pop)
+
+/* Enable events */
+#define RING_EVENT_FLAGS_ENABLE 0x0
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor
+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).
+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
+ */
+#define RING_EVENT_FLAGS_DESC 0x2
+/* The value 0x3 is reserved */
+
+struct pvirtq_event_suppress {
+	union {
+		struct {
+			ule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */
+			ule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */
+		};
+		ule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+	};
+
+	/* phys address must be 4 byte aligned */
+#pragma pack(push, 16)
+	union {
+		struct {
+			ule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */
+			ule16 reserved : 14; /* Reserved, set to 0 */
+		};
+		ule16 flags;
+	};
+};
+
+#pragma pack(pop)
+
+/*
+ * Common virtq descr
+ */
+#define vq_set_next(_vq, index, nxt)                \
+	do {                                       \
+		__typeof__(_vq) (vq) = (_vq); \
+		if ((vq)->vq_type == SPLIT_RING)   \
+			(vq)->s[index].next = nxt; \
+	} while (0)
+#define vq_add_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags |= flgs;  \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags |= flgs;  \
+	} while (0)
+#define vq_set_flags(_vq, _index, _flgs)                  \
+	do {                                           \
+		__typeof__(_vq) (vq) = (_vq); \
+		__typeof__(_index) (index) = (_index); \
+		__typeof__(_flgs) (flgs) = (_flgs); \
+		if ((vq)->vq_type == SPLIT_RING)       \
+			(vq)->s[index].flags = flgs;   \
+		else if ((vq)->vq_type == PACKED_RING) \
+			(vq)->p[index].flags = flgs;   \
+	} while (0)
+
+struct nthw_virtq_desc_buf {
+	/* Address (guest-physical). */
+	ule64 addr;
+	/* Length. */
+	ule32 len;
+} __rte_aligned(16);
+
+struct nthw_cvirtq_desc {
+	union {
+		struct nthw_virtq_desc_buf *b; /* buffer part as is common */
+		struct virtq_desc *s; /* SPLIT */
+		struct pvirtq_desc *p; /* PACKED */
+	};
+	uint16_t vq_type;
+};
+
+/* Setup a virt_queue for a VM */
+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t header, uint32_t vq_type, int irq_vector);
+
+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);
+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint16_t start_idx,
+	uint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,
+	void *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,
+	uint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,
+	int irq_vector, uint32_t in_order);
+
+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);
+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);
+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,
+		uint32_t outport);
+
+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector);
+
+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);
+
+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index, uint32_t queue_size,
+	uint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,
+	struct nthw_memory_descriptor *
+	p_virt_struct_area,
+	struct nthw_memory_descriptor *
+	p_packet_buffers,
+	uint32_t vq_type, int irq_vector, uint32_t in_order);
+
+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);
+
+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,
+			   uint32_t ir, uint32_t bs);
+
+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,
+				uint32_t divider);
+
+struct nthw_received_packets {
+	void *addr;
+	uint32_t len;
+};
+
+/*
+ * These functions handles both Split and Packed including merged buffers (jumbo)
+ */
+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,
+			     struct nthw_received_packets *rp,
+			     uint16_t *nb_pkts);
+
+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);
+
+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,
+			     struct nthw_memory_descriptor **p_virt_addr);
+
+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,
+			     uint16_t n_segs[]);
+
+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);
+
+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);
+
+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);
+
+#endif
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
new file mode 100644
index 0000000000..d547926453
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -0,0 +1,4256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h> /* sleep() */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <sys/queue.h>
+
+#include "ntdrv_4ga.h"
+
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_vfio.h>
+#include <rte_flow_driver.h>
+#include <vdpa_driver.h>
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+#include "ntnic_hshconfig.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_meter.h"
+
+#include "flow_api.h"
+
+#ifdef NT_TOOLS
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntconnect_modules/ntconn_modules.h"
+#endif
+
+/* Defines: */
+
+#define HW_MAX_PKT_LEN (10000)
+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU 46
+#define MIN_MTU_INLINE 512
+
+#include "ntnic_dbsconfig.h"
+
+#define EXCEPTION_PATH_HID 0
+
+#define MAX_TOTAL_QUEUES 128
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+int lag_active;
+
+static struct {
+	struct nthw_virt_queue *vq;
+	int managed;
+	int rx;
+} rel_virt_queue[MAX_REL_VQS];
+
+#define MAX_RX_PACKETS 128
+#define MAX_TX_PACKETS 128
+
+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \
+	defined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)
+static void dump_packet_seg(const char *text, uint8_t *data, int len)
+{
+	int x;
+
+	if (text)
+		printf("%s (%p, len %i)", text, data, len);
+	for (x = 0; x < len; x++) {
+		if (!(x % 16))
+			printf("\n%04X:", x);
+		printf(" %02X", *(data + x));
+	}
+	printf("\n");
+}
+#endif
+
+/* Global statistics: */
+extern const struct rte_flow_ops _dev_flow_ops;
+struct pmd_internals *pmd_intern_base;
+uint64_t rte_tsc_freq;
+
+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/
+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };
+/*Register the custom module binding to EAL --log-level option here*/
+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = "pmd.net.ntnic.general",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = "pmd.net.ntnic.nthw",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = "pmd.net.ntnic.filter",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = "pmd.net.ntnic.vdpa",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = "pmd.net.ntnic.fpga",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =
+	"pmd.net.ntnic.ntconnect",
+	[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = "pmd.net.ntnic.ethdev"
+};
+
+/*--------------------------------------------------------------------------*/
+
+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
+
+static void *lag_management(void *arg);
+static void (*previous_handler)(int sig);
+static pthread_t shutdown_tid;
+int kill_pmd;
+
+#define ETH_DEV_NTNIC_HELP_ARG "help"
+#define ETH_DEV_NTHW_PORTMASK_ARG "portmask"
+#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
+#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
+#define ETH_DEV_NTHW_PORTQUEUES_ARG "portqueues"
+#define ETH_DEV_NTHW_REPRESENTOR_ARG "representor"
+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG "exception_path"
+#define ETH_NTNIC_LAG_PRIMARY_ARG "primary"
+#define ETH_NTNIC_LAG_BACKUP_ARG "backup"
+#define ETH_NTNIC_LAG_MODE_ARG "mode"
+#define ETH_DEV_NTHW_LINK_SPEED_ARG "port.link_speed"
+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG "supported-fpgas"
+
+#define DVIO_VHOST_DIR_NAME "/usr/local/var/run/"
+
+static const char *const valid_arguments[] = {
+	ETH_DEV_NTNIC_HELP_ARG,
+	ETH_DEV_NTHW_PORTMASK_ARG,
+	ETH_DEV_NTHW_RXQUEUES_ARG,
+	ETH_DEV_NTHW_TXQUEUES_ARG,
+	ETH_DEV_NTHW_PORTQUEUES_ARG,
+	ETH_DEV_NTHW_REPRESENTOR_ARG,
+	ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+	ETH_NTNIC_LAG_PRIMARY_ARG,
+	ETH_NTNIC_LAG_BACKUP_ARG,
+	ETH_NTNIC_LAG_MODE_ARG,
+	ETH_DEV_NTHW_LINK_SPEED_ARG,
+	ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+	NULL,
+};
+
+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+
+/* Functions: */
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id nthw_pci_id_map[] = {
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
+	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },
+	{
+		.vendor_id = 0,
+	}, /* sentinel */
+};
+
+/*
+ * Store and get adapter info
+ */
+
+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };
+
+static void store_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX) {
+		NT_LOG(ERR, ETHDEV,
+		       "Internal error adapter number %u out of range. Max number of adapters: %u\n",
+		       p_drv->adapter_no, NUM_ADAPTER_MAX);
+		return;
+	}
+	if (g_p_drv[p_drv->adapter_no] != 0) {
+		NT_LOG(WRN, ETHDEV,
+		       "Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       " with adapter structure for PCI  " PCIIDENT_PRINT_STR
+		       "\n",
+		       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
+		       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
+		       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
+	}
+	rte_spinlock_lock(&hwlock);
+	g_p_drv[p_drv->adapter_no] = p_drv;
+	rte_spinlock_unlock(&hwlock);
+}
+
+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)
+{
+	int i;
+	struct drv_s *p_drv = NULL;
+
+	rte_spinlock_lock(&hwlock);
+	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
+		if (g_p_drv[i]) {
+			if (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==
+					addr.domain &&
+					PCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==
+					addr.bus) {
+				p_drv = g_p_drv[i];
+				break;
+			}
+		}
+	}
+	rte_spinlock_unlock(&hwlock);
+	return p_drv;
+}
+
+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)
+{
+	struct rte_pci_addr addr;
+
+	addr.domain = PCIIDENT_TO_DOMAIN(pciident);
+	addr.bus = PCIIDENT_TO_BUSNR(pciident);
+	addr.devid = PCIIDENT_TO_DEVNR(pciident);
+	addr.function = PCIIDENT_TO_FUNCNR(pciident);
+	return get_pdrv_from_pci(addr);
+}
+
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)
+{
+	struct drv_s *p_drv = get_pdrv_from_pciident(pciident);
+
+	return nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);
+}
+
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)
+{
+	nthw_dbs_t *p_nthw_dbs = NULL;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		p_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	}	else {
+		NT_LOG(ERR, ETHDEV,
+		       "Adapter DBS %p (p_drv=%p) info for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,
+		       pci_addr.function);
+	}
+	return p_nthw_dbs;
+}
+
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)
+{
+	enum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;
+	struct drv_s *p_drv;
+
+	p_drv = get_pdrv_from_pci(pci_addr);
+	if (p_drv) {
+		fpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+	} else {
+		NT_LOG(ERR, ETHDEV,
+		       "FPGA profile (p_drv=%p) for adapter with PCI " PCIIDENT_PRINT_STR
+		       " is not found\n",
+		       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
+	}
+	return fpga_profile;
+}
+
+static int string_to_u32(const char *key_str __rte_unused,
+			 const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	const uint32_t value = strtol(value_str, NULL, 0);
+	*(uint32_t *)extra_args = value;
+	return 0;
+}
+
+struct port_link_speed {
+	int port_id;
+	int link_speed;
+};
+
+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */
+static int string_to_port_link_speed(const char *key_str __rte_unused,
+				     const char *value_str, void *extra_args)
+{
+	if (!value_str || !extra_args)
+		return -1;
+	char *semicol;
+	const uint32_t pid = strtol(value_str, &semicol, 10);
+
+	if (*semicol != ':')
+		return -1;
+	const uint32_t lspeed = strtol(++semicol, NULL, 10);
+	struct port_link_speed *pls = *(struct port_link_speed **)extra_args;
+
+	pls->port_id = pid;
+	pls->link_speed = lspeed;
+	++(*((struct port_link_speed **)(extra_args)));
+	return 0;
+}
+
+static int dpdk_stats_collect(struct pmd_internals *internals,
+			      struct rte_eth_stats *stats)
+{
+	unsigned int i;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	const int if_index = internals->if_index;
+	uint64_t rx_total = 0;
+	uint64_t rx_total_b = 0;
+	uint64_t tx_total = 0;
+	uint64_t tx_total_b = 0;
+	uint64_t tx_err_total = 0;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
+			if_index > NUM_ADAPTER_PORTS_MAX) {
+		NT_LOG(WRN, ETHDEV, "%s - error exit\n", __func__);
+		return -1;
+	}
+
+	/*
+	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
+	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
+	 */
+	poll_statistics(internals);
+
+	memset(stats, 0, sizeof(*stats));
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;
+			i++) {
+		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
+		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
+		rx_total += stats->q_ipackets[i];
+		rx_total_b += stats->q_ibytes[i];
+	}
+
+	for (i = 0;
+			i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;
+			i++) {
+		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
+		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
+		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
+		tx_total += stats->q_opackets[i];
+		tx_total_b += stats->q_obytes[i];
+		tx_err_total += stats->q_errors[i];
+	}
+
+	stats->imissed = internals->rx_missed;
+	stats->ipackets = rx_total;
+	stats->ibytes = rx_total_b;
+	stats->opackets = tx_total;
+	stats->obytes = tx_total_b;
+	stats->oerrors = tx_err_total;
+
+	return 0;
+}
+
+static int dpdk_stats_reset(struct pmd_internals *internals,
+			    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)
+{
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	unsigned int i;
+
+	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||
+			n_intf_no > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/* Rx */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		internals->rxq_scg[i].rx_pkts = 0;
+		internals->rxq_scg[i].rx_bytes = 0;
+		internals->rxq_scg[i].err_pkts = 0;
+	}
+
+	internals->rx_missed = 0;
+
+	/* Tx */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		internals->txq_scg[i].tx_pkts = 0;
+		internals->txq_scg[i].tx_bytes = 0;
+		internals->txq_scg[i].err_pkts = 0;
+	}
+
+	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
+
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */
+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)
+{
+	int eth_speed_num = ETH_SPEED_NUM_NONE;
+
+	switch (nt_link_speed) {
+	case NT_LINK_SPEED_10M:
+		eth_speed_num = ETH_SPEED_NUM_10M;
+		break;
+	case NT_LINK_SPEED_100M:
+		eth_speed_num = ETH_SPEED_NUM_100M;
+		break;
+	case NT_LINK_SPEED_1G:
+		eth_speed_num = ETH_SPEED_NUM_1G;
+		break;
+	case NT_LINK_SPEED_10G:
+		eth_speed_num = ETH_SPEED_NUM_10G;
+		break;
+	case NT_LINK_SPEED_25G:
+		eth_speed_num = ETH_SPEED_NUM_25G;
+		break;
+	case NT_LINK_SPEED_40G:
+		eth_speed_num = ETH_SPEED_NUM_40G;
+		break;
+	case NT_LINK_SPEED_50G:
+		eth_speed_num = ETH_SPEED_NUM_50G;
+		break;
+	case NT_LINK_SPEED_100G:
+		eth_speed_num = ETH_SPEED_NUM_100G;
+		break;
+	default:
+		eth_speed_num = ETH_SPEED_NUM_NONE;
+		break;
+	}
+
+	return eth_speed_num;
+}
+
+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)
+{
+	int eth_link_duplex = 0;
+
+	switch (nt_link_duplex) {
+	case NT_LINK_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case NT_LINK_DUPLEX_UNKNOWN: /* fall-through */
+	default:
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static int eth_link_update(struct rte_eth_dev *eth_dev,
+			   int wait_to_complete __rte_unused)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	if (eth_dev->data->dev_started) {
+		if (internals->type == PORT_TYPE_VIRTUAL ||
+				internals->type == PORT_TYPE_OVERRIDE) {
+			eth_dev->data->dev_link.link_status =
+				((internals->vport_comm ==
+				  VIRT_PORT_NEGOTIATED_NONE) ?
+				 ETH_LINK_DOWN :
+				 ETH_LINK_UP);
+			eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+			eth_dev->data->dev_link.link_duplex =
+				ETH_LINK_FULL_DUPLEX;
+			return 0;
+		}
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_status =
+			port_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+		nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_speed =
+			nt_link_speed_to_eth_speed_num(port_link_speed);
+
+		nt_link_duplex_t nt_link_duplex =
+			nt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);
+		eth_dev->data->dev_link.link_duplex =
+			nt_link_duplex_to_eth_duplex(nt_link_duplex);
+	} else {
+		eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+		eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+		eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	}
+	return 0;
+}
+
+static int eth_stats_get(struct rte_eth_dev *eth_dev,
+			 struct rte_eth_stats *stats)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	dpdk_stats_collect(internals, stats);
+	return 0;
+}
+
+static int eth_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	const int if_index = internals->if_index;
+
+	dpdk_stats_reset(internals, p_nt_drv, if_index);
+	return 0;
+}
+
+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)
+{
+	uint32_t eth_speed_capa = 0;
+
+	if (nt_link_speed_capa & NT_LINK_SPEED_10M)
+		eth_speed_capa |= ETH_LINK_SPEED_10M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100M)
+		eth_speed_capa |= ETH_LINK_SPEED_100M;
+	if (nt_link_speed_capa & NT_LINK_SPEED_1G)
+		eth_speed_capa |= ETH_LINK_SPEED_1G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_10G)
+		eth_speed_capa |= ETH_LINK_SPEED_10G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_25G)
+		eth_speed_capa |= ETH_LINK_SPEED_25G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_40G)
+		eth_speed_capa |= ETH_LINK_SPEED_40G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_50G)
+		eth_speed_capa |= ETH_LINK_SPEED_50G;
+	if (nt_link_speed_capa & NT_LINK_SPEED_100G)
+		eth_speed_capa |= ETH_LINK_SPEED_100G;
+
+	return eth_speed_capa;
+}
+
+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_dev_info *dev_info)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	dev_info->if_index = internals->if_index;
+	dev_info->driver_name = internals->name;
+	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
+	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
+	dev_info->max_mtu = MAX_MTU;
+	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)
+		dev_info->min_mtu = MIN_MTU_INLINE;
+
+	else
+		dev_info->min_mtu = MIN_MTU;
+
+	if (internals->p_drv) {
+		dev_info->max_rx_queues = internals->nb_rx_queues;
+		dev_info->max_tx_queues = internals->nb_tx_queues;
+
+		dev_info->min_rx_bufsize = 64;
+
+		const uint32_t nt_port_speed_capa =
+			nt4ga_port_get_link_speed_capabilities(p_adapter_info,
+							       n_intf_no);
+		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
+	}
+
+	dev_info->flow_type_rss_offloads =
+		RTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |
+		RTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |
+		RTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;
+	/*
+	 * NT hashing algorithm doesn't use key, so it is just a fake key length to
+	 * feet testpmd requirements.
+	 */
+	dev_info->hash_key_size = 1;
+
+	return 0;
+}
+
+static __rte_always_inline int
+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,
+		       struct nthw_received_packets *hw_recv, int max_segs,
+		       uint16_t data_len)
+{
+	int src_pkt = 0;
+	/*
+	 * 1. virtqueue packets may be segmented
+	 * 2. the mbuf size may be too small and may need to be segmented
+	 */
+	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
+	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+	/* set packet length */
+	mbuf->pkt_len = data_len - SG_HDR_SIZE;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	void *dbg_src_start = hw_recv->addr;
+	void *dbg_dst_start = dst;
+#endif
+
+	int remain = mbuf->pkt_len;
+	/* First cpy_size is without header */
+	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?
+		       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :
+		       remain;
+
+	struct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */
+
+	while (++src_pkt <= max_segs) {
+		/* keep track of space in dst */
+		int cpto_size = rte_pktmbuf_tailroom(m);
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("src copy size %i\n", cpy_size);
+#endif
+
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Seg %i: mbuf first cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+			       mbuf->nb_segs - 1,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+			m->data_len += new_cpy_size;
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				m->next = rte_pktmbuf_alloc(mb_pool);
+				if (unlikely(!m->next))
+					return -1;
+				m = m->next;
+
+				/* Headroom is not needed in chained mbufs */
+				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
+				dst = (char *)m->buf_addr;
+				m->data_len = 0;
+				m->pkt_len = 0;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				dbg_dst_start = dst;
+#endif
+				cpto_size = rte_pktmbuf_tailroom(m);
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("new dst mbuf seg - size %i\n",
+				       cpto_size);
+				printf("Seg %i: mbuf cpy src off 0x%" PRIX64 ", dst off 0x%" PRIX64 ", size %i\n",
+				       mbuf->nb_segs,
+				       (uint64_t)data - (uint64_t)dbg_src_start,
+				       (uint64_t)dst - (uint64_t)dbg_dst_start,
+				       actual_cpy_size);
+#endif
+
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+				m->pkt_len += actual_cpy_size;
+				m->data_len += actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+				mbuf->nb_segs++;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* all data from this virtqueue segment can fit in current mbuf */
+#ifdef RX_MERGE_SEGMENT_DEBUG
+			printf("Copy all into Seg %i: %i bytes, src off 0x%" PRIX64
+			       ", dst off 0x%" PRIX64 "\n",
+			       mbuf->nb_segs - 1, cpy_size,
+			       (uint64_t)data - (uint64_t)dbg_src_start,
+			       (uint64_t)dst - (uint64_t)dbg_dst_start);
+#endif
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+			m->data_len += cpy_size;
+			if (mbuf->nb_segs > 1)
+				m->pkt_len += cpy_size;
+			remain -= cpy_size;
+		}
+
+		/* packet complete - all data from current virtqueue packet has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
+		/* prepare for next virtqueue segment */
+		data = (char *)hw_recv[src_pkt]
+		       .addr; /* following packets are full data */
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		dbg_src_start = data;
+#endif
+		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?
+			   SG_HW_RX_PKT_BUFFER_SIZE :
+			   remain;
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("next src buf\n");
+#endif
+	};
+
+	if (src_pkt > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return src_pkt;
+}
+
+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct ntnic_rx_queue *rx_q = queue;
+	uint16_t num_rx = 0;
+
+	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_called(rx_q->port);
+#endif
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (nb_pkts > MAX_RX_PACKETS)
+		nb_pkts = MAX_RX_PACKETS;
+
+	uint16_t whole_pkts;
+	uint16_t hw_recv_pkt_segs =
+		nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
+
+	if (!hw_recv_pkt_segs) {
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+		dbg_print_approx_cpu_load_rx_done(rx_q->port, 0);
+#endif
+
+		return 0;
+	}
+
+#ifdef NT_DEBUG_STAT
+	dbg_rx_queue(rx_q,
+		     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */
+#endif
+
+	nb_pkts = whole_pkts;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	printf("\n---------- DPDK Rx ------------\n");
+	printf("[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, "
+	       "vq buf %i, vq header size %i\n",
+	       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,
+	       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+#endif
+
+	int src_pkt = 0; /* from 0 to hw_recv_pkt_segs */
+
+	for (i = 0; i < nb_pkts; i++) {
+		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
+		if (!bufs[i]) {
+			printf("ERROR - no more buffers mbuf in mempool\n");
+			goto err_exit;
+		}
+		mbuf = bufs[i];
+
+		struct _pkt_hdr_rx *phdr =
+			(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+		printf("\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\n",
+		       i, phdr->cap_len - SG_HDR_SIZE,
+		       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+		       SG_HW_RX_PKT_BUFFER_SIZE,
+		       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));
+#endif
+
+#ifdef RX_SRC_DUMP_PKTS_DEBUG
+		{
+			int d, _segs = (phdr->cap_len +
+					SG_HW_RX_PKT_BUFFER_SIZE - 1) /
+				       SG_HW_RX_PKT_BUFFER_SIZE;
+			int _size = phdr->cap_len;
+
+			printf("Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\n",
+			       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,
+			       _segs);
+			for (d = 0; d < _segs; d++) {
+				printf("Dump seg %i:\n", d);
+				dump_packet_seg("Vq seg:", hw_recv[src_pkt + d].addr,
+						_size > SG_HW_RX_PKT_BUFFER_SIZE ?
+						SG_HW_RX_PKT_BUFFER_SIZE :
+						_size);
+				_size -= SG_HW_RX_PKT_BUFFER_SIZE;
+			}
+		}
+#endif
+
+		if (phdr->cap_len < SG_HDR_SIZE) {
+			printf("Pkt len of zero received. No header!! - dropping packets\n");
+			rte_pktmbuf_free(mbuf);
+			goto err_exit;
+		}
+
+		{
+			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
+					(phdr->cap_len - SG_HDR_SIZE) <=
+					rte_pktmbuf_tailroom(mbuf)) {
+#ifdef RX_MERGE_SEGMENT_DEBUG
+				printf("Simple copy vq -> mbuf %p size %i\n",
+				       rte_pktmbuf_mtod(mbuf, void *),
+				       phdr->cap_len);
+#endif
+				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
+				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
+					   (char *)hw_recv[src_pkt].addr +
+					   SG_HDR_SIZE,
+					   mbuf->data_len);
+
+				mbuf->pkt_len = mbuf->data_len;
+				src_pkt++;
+			} else {
+				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
+								      &hw_recv[src_pkt],
+								      hw_recv_pkt_segs - src_pkt,
+								      phdr->cap_len);
+				if (cpy_segs < 0) {
+					/* Error */
+					rte_pktmbuf_free(mbuf);
+					goto err_exit;
+				}
+				src_pkt += cpy_segs;
+			}
+
+#ifdef RX_DST_DUMP_PKTS_DEBUG
+			{
+				struct rte_mbuf *m = mbuf;
+
+				printf("\nRx final mbuf:\n");
+				for (int ii = 0; m && ii < m->nb_segs; ii++) {
+					printf("  seg %i len %i\n", ii,
+					       m->data_len);
+					printf("  seg dump:\n");
+					dump_packet_seg("mbuf seg:",
+							rte_pktmbuf_mtod(m, uint8_t *),
+							m->data_len);
+					m = m->next;
+				}
+			}
+#endif
+
+			num_rx++;
+
+			mbuf->ol_flags &=
+				~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
+			mbuf->port = (uint16_t)-1;
+
+			if (phdr->color_type == 0) {
+				if (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+						((phdr->color >> 24) == 0x02)) {
+					/* VNI in color of descriptor add port as well */
+					mbuf->hash.fdir.hi =
+						((uint32_t)phdr->color &
+						 0xffffff) |
+						((uint32_t)phdr->port
+						 << 24);
+					mbuf->hash.fdir.lo =
+						(uint32_t)phdr->fid;
+					mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
+
+					NT_LOG(DBG, ETHDEV,
+					       "POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i",
+					       phdr->color, phdr->port,
+					       phdr->fid);
+				}
+
+			} else {
+				if (phdr->color) {
+					mbuf->hash.fdir.hi =
+						phdr->color &
+						(NT_MAX_COLOR_FLOW_STATS - 1);
+					mbuf->ol_flags |=
+						RTE_MBUF_F_RX_FDIR_ID |
+						RTE_MBUF_F_RX_FDIR;
+				}
+			}
+		}
+	}
+
+err_exit:
+	nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
+
+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD
+	dbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);
+#endif
+
+#ifdef RX_MERGE_SEGMENT_DEBUG
+	/*
+	 * When the application double frees a mbuf, it will become a doublet in the memory pool
+	 * This is obvious a bug in application, but can be verified here to some extend at least
+	 */
+	uint64_t addr = (uint64_t)bufs[0]->buf_addr;
+
+	for (int i = 1; i < num_rx; i++) {
+		if (bufs[i]->buf_addr == addr) {
+			printf("Duplicate packet addresses! num_rx %i\n",
+			       num_rx);
+			for (int ii = 0; ii < num_rx; ii++) {
+				printf("bufs[%i]->buf_addr %p\n", ii,
+				       bufs[ii]->buf_addr);
+			}
+		}
+	}
+#endif
+
+	return num_rx;
+}
+
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf)
+{
+	/*
+	 * 1. mbuf packet may be segmented
+	 * 2. the virtqueue buffer size may be too small and may need to be segmented
+	 */
+
+	char *data = rte_pktmbuf_mtod(mbuf, char *);
+	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
+
+	int remain = mbuf->pkt_len;
+	int cpy_size = mbuf->data_len;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+	printf("src copy size %i\n", cpy_size);
+#endif
+
+	struct rte_mbuf *m = mbuf;
+	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
+
+	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
+
+	int cur_seg_num = 0; /* start from 0 */
+
+	while (m) {
+		/* Can all data in current src segment be in current dest segment */
+		if (cpy_size > cpto_size) {
+			int new_cpy_size = cpto_size;
+
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Seg %i: virtq buf first cpy src offs %u, dst offs 0x%" PRIX64 ", size %i\n",
+			       cur_seg_num,
+			       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,
+			       new_cpy_size);
+#endif
+			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
+
+			remain -= new_cpy_size;
+			cpy_size -= new_cpy_size;
+
+			data += new_cpy_size;
+
+			/*
+			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
+			 * mbuf
+			 */
+			do {
+				vq_add_flags(cvq_desc, vq_descr_idx,
+					     VIRTQ_DESC_F_NEXT);
+
+				int next_vq_descr_idx =
+					VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
+
+				vq_set_next(cvq_desc, vq_descr_idx,
+					    next_vq_descr_idx);
+
+				vq_descr_idx = next_vq_descr_idx;
+
+				vq_set_flags(cvq_desc, vq_descr_idx, 0);
+				vq_set_next(cvq_desc, vq_descr_idx, 0);
+
+				if (++cur_seg_num > max_segs)
+					break;
+
+				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
+				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
+
+				int actual_cpy_size = (cpy_size > cpto_size) ?
+						      cpto_size :
+						      cpy_size;
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+				printf("Tx vq buf seg %i: virtq cpy %i - offset 0x%" PRIX64 "\n",
+				       cur_seg_num, actual_cpy_size,
+				       (uint64_t)dst -
+				       (uint64_t)vq_bufs[vq_descr_idx]
+				       .virt_addr);
+#endif
+				rte_memcpy((void *)dst, (void *)data,
+					   actual_cpy_size);
+
+				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
+
+				remain -= actual_cpy_size;
+				cpy_size -= actual_cpy_size;
+				cpto_size -= actual_cpy_size;
+
+				data += actual_cpy_size;
+
+			} while (cpy_size && remain);
+
+		} else {
+			/* All data from this segment can fit in current virtqueue buffer */
+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG
+			printf("Tx vq buf seg %i: Copy %i bytes - offset %u\n",
+			       cur_seg_num, cpy_size,
+			       (uint64_t)dst -
+			       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);
+#endif
+
+			rte_memcpy((void *)dst, (void *)data, cpy_size);
+
+			cvq_desc->b[vq_descr_idx].len += cpy_size;
+
+			remain -= cpy_size;
+			cpto_size -= cpy_size;
+		}
+
+		/* Packet complete - all segments from current mbuf has been copied */
+		if (remain == 0)
+			break;
+		/* increment dst to data end */
+		dst = (char *)vq_bufs[vq_descr_idx].virt_addr +
+		      cvq_desc->b[vq_descr_idx].len;
+
+		m = m->next;
+		if (!m) {
+			NT_LOG(ERR, ETHDEV, "ERROR: invalid packet size\n");
+			break;
+		}
+
+		/* Prepare for next mbuf segment */
+		data = rte_pktmbuf_mtod(m, char *);
+		cpy_size = m->data_len;
+	};
+
+	cur_seg_num++;
+	if (cur_seg_num > max_segs) {
+		NT_LOG(ERR, ETHDEV,
+		       "Did not receive correct number of segment for a whole packet");
+		return -1;
+	}
+
+	return cur_seg_num;
+}
+
+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,
+			       uint16_t nb_pkts)
+{
+	uint16_t pkt;
+	uint16_t first_vq_descr_idx = 0;
+
+	struct nthw_cvirtq_desc cvq_desc;
+
+	struct nthw_memory_descriptor *vq_bufs;
+
+	struct ntnic_tx_queue *tx_q = queue;
+
+	int nb_segs = 0, i;
+	int pkts_sent = 0;
+	uint16_t nb_segs_arr[MAX_TX_PACKETS];
+
+	if (kill_pmd)
+		return 0;
+
+	if (nb_pkts > MAX_TX_PACKETS)
+		nb_pkts = MAX_TX_PACKETS;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\n---------- DPDK Tx ------------\n");
+#endif
+
+	/*
+	 * count all segments needed to contain all packets in vq buffers
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		if (bufs[i]->pkt_len < 60) {
+			bufs[i]->pkt_len = 60;
+			bufs[i]->data_len = 60;
+		}
+
+		/* build the num segments array for segmentation control and release function */
+		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
+
+		nb_segs_arr[i] = vq_segs;
+		nb_segs += vq_segs;
+	}
+	if (!nb_segs)
+		goto exit_out;
+
+#ifdef TX_CHAINING_DEBUG
+	printf("[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\n",
+	       tx_q->port, nb_segs, nb_pkts,
+	       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),
+	       rte_pktmbuf_headroom(bufs[0]));
+#endif
+
+	int got_nb_segs =
+		nthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,
+				    &cvq_desc /*&vq_descr,*/, &vq_bufs);
+	if (!got_nb_segs) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Zero segments got - back pressure from HW\n");
+#endif
+		goto exit_out;
+	}
+
+	/*
+	 * we may get less vq buffers than we have asked for
+	 * calculate last whole packet that can fit into what
+	 * we have got
+	 */
+	while (got_nb_segs < nb_segs) {
+		if (!--nb_pkts)
+			goto exit_out;
+		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
+		if (nb_segs <= 0)
+			goto exit_out;
+	}
+
+	/*
+	 * nb_pkts & nb_segs, got it all, ready to copy
+	 */
+	int seg_idx = 0;
+	int last_seg_idx = seg_idx;
+
+	for (pkt = 0; pkt < nb_pkts; ++pkt) {
+		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
+
+		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
+		vq_set_next(&cvq_desc, vq_descr_idx, 0);
+
+		struct _pkt_hdr_tx *hdr_tx =
+			(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;
+		/* Set the header to all zeros */
+		memset(hdr_tx, 0, SG_HDR_SIZE);
+
+		/*
+		 * Set the NT DVIO0 header fields
+		 *
+		 * Applicable for Vswitch only.
+		 * For other product types the header values are "don't care" and we leave them as
+		 * all zeros.
+		 */
+		if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+			hdr_tx->bypass_port = tx_q->target_id;
+
+			/* set packet length */
+			hdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
+		}
+
+#ifdef TX_CHAINING_DEBUG
+		printf("\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\n",
+		       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,
+		       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);
+
+#ifdef TX_SRC_DUMP_PKTS_DEBUG
+		{
+			struct rte_mbuf *m = bufs[pkt];
+			int ii;
+
+			printf("Dump src mbuf:\n");
+			for (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {
+				printf("  seg %i len %i\n", ii, m->data_len);
+				printf("  seg dump:\n");
+				dump_packet_seg("mbuf seg:",
+						rte_pktmbuf_mtod(m, uint8_t *),
+						m->data_len);
+				m = m->next;
+			}
+		}
+#endif
+
+#endif
+
+		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
+#ifdef TX_CHAINING_DEBUG
+			printf("Simple copy %i bytes - mbuf -> vq\n",
+			       bufs[pkt]->pkt_len);
+#endif
+			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +
+				SG_HDR_SIZE),
+				rte_pktmbuf_mtod(bufs[pkt], void *),
+				bufs[pkt]->pkt_len);
+
+			cvq_desc.b[vq_descr_idx].len =
+				bufs[pkt]->pkt_len + SG_HDR_SIZE;
+
+			seg_idx++;
+		} else {
+			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,
+							      vq_descr_idx, vq_bufs,
+							      nb_segs - last_seg_idx, bufs[pkt]);
+			if (cpy_segs < 0)
+				break;
+			seg_idx += cpy_segs;
+		}
+
+#ifdef TX_DST_DUMP_PKTS_DEBUG
+		int d, tot_size = 0;
+
+		for (d = last_seg_idx; d < seg_idx; d++)
+			tot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;
+		printf("\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\n",
+		       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,
+		       tx_q->queue.hw_id);
+		for (d = last_seg_idx; d < seg_idx; d++) {
+			char str[32];
+
+			sprintf(str, "Vq seg %i:", d - last_seg_idx);
+			dump_packet_seg(str,
+					vq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,
+					cvq_desc.b[VIRTQ_DESCR_IDX(d)].len);
+		}
+#endif
+
+		last_seg_idx = seg_idx;
+		rte_pktmbuf_free(bufs[pkt]);
+		pkts_sent++;
+	}
+
+#ifdef TX_CHAINING_DEBUG
+	printf("\nTx final vq setup:\n");
+	for (int i = 0; i < nb_segs; i++) {
+		int idx = VIRTQ_DESCR_IDX(i);
+
+		if (cvq_desc.vq_type == SPLIT_RING) {
+			printf("virtq descr %i, len %i, flags %04x, next %i\n",
+			       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,
+			       cvq_desc.s[idx].next);
+		}
+	}
+#endif
+
+exit_out:
+
+	if (pkts_sent) {
+#ifdef TX_CHAINING_DEBUG
+		printf("Release virtq segs %i\n", nb_segs);
+#endif
+		nthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);
+	}
+	return pkts_sent;
+}
+
+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,
+				     struct hwq_s *hwq, int num_descr,
+				     int buf_size)
+{
+	int i, res;
+	uint32_t size;
+	uint64_t iova_addr;
+
+	NT_LOG(DBG, ETHDEV,
+	       "***** Configure IOMMU for HW queues on VF %i *****\n", vf_num);
+
+	/* Just allocate 1MB to hold all combined descr rings */
+	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
+
+	void *virt = rte_malloc_socket("VirtQDescr", tot_alloc_size,
+				       ALIGN_SIZE(tot_alloc_size),
+				       eth_dev->data->numa_node);
+	if (!virt)
+		return -1;
+
+	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
+	rte_iova_t hpa = rte_malloc_virt2iova(virt);
+
+	NT_LOG(DBG, ETHDEV,
+	       "Allocated virtio descr rings : virt %p [0x%" PRIX64
+	       "], hpa %p [0x%" PRIX64 "]\n",
+	       virt, gp_offset, hpa, hpa & ONE_G_MASK);
+
+	/*
+	 * Same offset on both HPA and IOVA
+	 * Make sure 1G boundary is never crossed
+	 */
+	if (((hpa & ONE_G_MASK) != gp_offset) ||
+			(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
+			((uint64_t)virt & ~ONE_G_MASK)) {
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+		NT_LOG(ERR, ETHDEV,
+		       "ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\n",
+		       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
+		NT_LOG(ERR, ETHDEV,
+		       "*********************************************************\n");
+
+		rte_free(virt);
+
+		/* Just allocate 1MB to hold all combined descr rings */
+		size = 0x100000;
+		void *virt = rte_malloc_socket("VirtQDescr", size, 4096,
+					       eth_dev->data->numa_node);
+		if (!virt)
+			return -1;
+
+		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, vf_num %i\n", res,
+		       vf_num);
+		if (res != 0)
+			return -1;
+
+		hwq->vf_num = vf_num;
+		hwq->virt_queues_ctrl.virt_addr = virt;
+		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
+		hwq->virt_queues_ctrl.len = size;
+
+		NT_LOG(DBG, ETHDEV,
+		       "Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\n",
+		       virt, iova_addr);
+
+		size = num_descr * sizeof(struct nthw_memory_descriptor);
+		hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size,
+						      64, eth_dev->data->numa_node);
+		if (!hwq->pkt_buffers) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed to allocated buffer array for hw-queue %p, "
+			       "total size %i, elements %i\n",
+			       hwq->pkt_buffers, size, num_descr);
+			rte_free(virt);
+			return -1;
+		}
+
+		size = buf_size * num_descr;
+		void *virt_addr = rte_malloc_socket("pkt_buffer_pkts", size,
+						    4096,
+						    eth_dev->data->numa_node);
+		if (!virt_addr) {
+			NT_LOG(ERR, ETHDEV,
+			       "Failed allocate packet buffers for hw-queue %p, "
+			       "buf size %i, elements %i\n",
+			       hwq->pkt_buffers, buf_size, num_descr);
+			rte_free(hwq->pkt_buffers);
+			rte_free(virt);
+			return -1;
+		}
+
+		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
+
+		NT_LOG(DBG, ETHDEV,
+		       "VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num "
+		       "pkt bufs %i, tot size %i\n",
+		       res, virt_addr, iova_addr, vf_num, num_descr, size);
+
+		if (res != 0)
+			return -1;
+
+		for (i = 0; i < num_descr; i++) {
+			hwq->pkt_buffers[i].virt_addr =
+				(void *)((char *)virt_addr +
+					 ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].phys_addr =
+				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+			hwq->pkt_buffers[i].len = buf_size;
+		}
+
+		return 0;
+	} /* End of: no optimal IOMMU mapping available */
+
+	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO MMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	hwq->vf_num = vf_num;
+	hwq->virt_queues_ctrl.virt_addr = virt;
+	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
+	hwq->virt_queues_ctrl.len = 0x100000;
+	iova_addr += 0x100000;
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP: virt_addr=%" PRIX64 " phys_addr=%" PRIX64
+	       " size=%" PRIX64 " hpa=%" PRIX64 "\n",
+	       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
+	       hwq->virt_queues_ctrl.len,
+	       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
+
+	size = num_descr * sizeof(struct nthw_memory_descriptor);
+	hwq->pkt_buffers = rte_zmalloc_socket("rx_pkt_buffers", size, 64,
+					      eth_dev->data->numa_node);
+	if (!hwq->pkt_buffers) {
+		NT_LOG(ERR, ETHDEV,
+		       "Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\n",
+		       hwq->pkt_buffers, size, num_descr);
+		rte_free(virt);
+		return -1;
+	}
+
+	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
+
+	for (i = 0; i < num_descr; i++) {
+		hwq->pkt_buffers[i].virt_addr =
+			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].phys_addr =
+			(void *)(iova_addr + ((uint64_t)(i) * buf_size));
+		hwq->pkt_buffers[i].len = buf_size;
+	}
+	return 0;
+}
+
+static void release_hw_virtio_queues(struct hwq_s *hwq)
+{
+	if (!hwq || hwq->vf_num == 0)
+		return;
+	hwq->vf_num = 0;
+}
+
+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
+{
+	int vf_num = hwq->vf_num;
+
+	void *virt = hwq->virt_queues_ctrl.virt_addr;
+
+	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
+				    (uint64_t)hwq->virt_queues_ctrl.phys_addr,
+				    ONE_G_SIZE);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMMAP FAILED! res %i, vf_num %i\n",
+		       res, vf_num);
+		return -1;
+	}
+
+	release_hw_virtio_queues(hwq);
+	rte_free(hwq->pkt_buffers);
+	rte_free(virt);
+	return 0;
+}
+
+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&tx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
+
+	deallocate_hw_virtio_queues(&rx_q->hwq);
+	NT_LOG(DBG, ETHDEV, "NTNIC: %s\n", __func__);
+}
+
+static int num_queues_allocated;
+
+/* Returns num queue starting at returned queue num or -1 on fail */
+static int allocate_queue(int num)
+{
+	int next_free = num_queues_allocated;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\n",
+	       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);
+	if (num_queues_allocated + num > MAX_TOTAL_QUEUES)
+		return -1;
+	num_queues_allocated += num;
+	return next_free;
+}
+
+static int
+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,
+		       uint16_t nb_rx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_rxconf *rx_conf __rte_unused,
+		       struct rte_mempool *mb_pool)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		rx_q->mb_pool = mb_pool;
+		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+					    RTE_PKTMBUF_HEADROOM);
+		rx_q->enabled = 1;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       internals->port, rx_queue_id, rx_q->queue.hw_id);
+
+	rx_q->mb_pool = mb_pool;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
+
+	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
+	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+				    RTE_PKTMBUF_HEADROOM);
+	rx_q->enabled = 1;
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
+				      SG_NB_HW_RX_DESCRIPTORS,
+				      SG_HW_RX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
+
+	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	rx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		rx_q->queue.hw_id, /* index */
+		rx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */
+		1, /* header NT DVIO header for exception path */
+		&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC RX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	return 0;
+}
+
+static int
+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		       uint16_t nb_tx_desc __rte_unused,
+		       unsigned int socket_id __rte_unused,
+		       const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+		return 0;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\n",
+	       tx_q->port, tx_queue_id, tx_q->queue.hw_id);
+
+	if (tx_queue_id > internals->nb_tx_queues) {
+		printf("Error invalid tx queue id\n");
+		return -1;
+	}
+
+	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
+
+	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
+	if (tx_q->rss_target_id >= 0) {
+		/* bypass to a multiqueue port - qsl-hsh index */
+		tx_q->target_id = tx_q->rss_target_id + 0x90;
+	} else {
+		if (internals->vpq[tx_queue_id].hw_id > -1) {
+			/* virtual port - queue index */
+			tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
+		} else {
+			/* Phy port - phy port identifier */
+			if (lag_active) {
+				/* If in LAG mode use bypass 0x90 mode */
+				tx_q->target_id = 0x90;
+			} else {
+				/* output/bypass to MAC */
+				tx_q->target_id = (int)(tx_q->port + 0x80);
+			}
+		}
+	}
+
+	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
+				      SG_NB_HW_TX_DESCRIPTORS,
+				      SG_HW_TX_PKT_BUFFER_SIZE) < 0)
+		return -1;
+
+	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
+
+	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
+
+	uint32_t port, header;
+
+	if (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		/* transmit port - not used in vswitch enabled mode - using bypass */
+		port = 0;
+		header = 1; /* header type DVIO0 Always for exception path */
+	} else {
+		port = tx_q->port; /* transmit port */
+		header = 0; /* header type VirtIO-Net */
+	}
+	/*
+	 * in_port - in vswitch mode has to move tx port from OVS excep. Away
+	 * from VM tx port, because of QoS is matched by port id!
+	 */
+	tx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
+		tx_q->queue.hw_id, /* index */
+		tx_q->nb_hw_tx_descr, /* queue size */
+		EXCEPTION_PATH_HID, /* host_id always VF4 */
+		port,
+		tx_q->port +
+		128,
+		header, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,
+		SPLIT_RING, -1, IN_ORDER);
+
+	tx_q->enabled = 1;
+	for (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {
+		nthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,
+					    internals->vpq[i].hw_id, tx_q->port);
+	}
+
+	NT_LOG(DBG, ETHDEV, "(%i) NTNIC TX OVS-SW queues successfully setup\n",
+	       internals->port);
+
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		struct adapter_info_s *p_adapter_info =
+				&internals->p_drv->ntdrv.adapter_info;
+		NT_LOG(DBG, ETHDEV, "Port %i is ready for data. Enable port\n",
+		       internals->if_index);
+		nt4ga_port_set_adm_state(p_adapter_info, internals->if_index,
+					 true);
+		if (lag_active && internals->if_index == 0) {
+			/*
+			 * Special case for link aggregation where the second phy interface (port 1)
+			 * is "hidden" from DPDK and therefore doesn't get enabled through normal
+			 * interface probing
+			 */
+			NT_LOG(DBG, ETHDEV, "LAG: Enable port %i\n",
+			       internals->if_index + 1);
+			nt4ga_port_set_adm_state(p_adapter_info,
+						 internals->if_index + 1, true);
+		}
+	}
+
+	return 0;
+}
+
+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	struct flow_eth_dev *flw_dev = internals->flw_dev;
+	int ret = -1;
+
+	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&
+			mtu <= MAX_MTU)
+		ret = flow_set_mtu_inline(flw_dev, internals->port, mtu);
+	return ret ? -EINVAL : 0;
+}
+
+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+	/*struct ntnic_tx_queue *tx_q = internals->txq; */
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	int retval = EINVAL;
+
+	if (mtu < MIN_MTU || mtu > MAX_MTU)
+		return -EINVAL;
+
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		if (retval)
+			return retval;
+
+		uint i;
+
+		for (i = 0; i < internals->vpq_nb_vq; i++) {
+			retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+				internals->vpq[i].hw_id, /* tx queue hw_id */
+				mtu, /* max number of bytes allowed for a given port. */
+				internals->type); /* port type */
+			if (retval)
+				return retval;
+
+			NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d queue hw_id %d\n",
+			       mtu, internals->vpq[i].hw_id);
+		}
+	} else if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* set MTU on exception to MAX_MTU */
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->rxq_scg[0]
+			.queue
+			.hw_id, /* exception tx queue hw_id to OVS */
+			MAX_MTU, /* max number of bytes allowed for a given port. */
+			PORT_TYPE_VIRTUAL); /* port type */
+		if (retval)
+			return retval;
+
+		retval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,
+			internals->port, /* port number */
+			mtu, /* max number of bytes allowed for a given port. */
+			internals->type); /* port type */
+
+		NT_LOG(DBG, ETHDEV, "SET MTU SIZE %d port %d\n", mtu,
+		       internals->port);
+	} else {
+		NT_LOG(DBG, ETHDEV,
+		       "COULD NOT SET MTU SIZE %d port %d type %d\n", mtu,
+		       internals->port, internals->type);
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+	eth_dev->data->tx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return;
+	}
+	(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));
+}
+
+static int eth_mac_addr_add(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr, uint32_t index,
+			    uint32_t vmdq __rte_unused)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	assert(index < NUM_MAC_ADDRS_PER_PORT);
+
+	if (index >= NUM_MAC_ADDRS_PER_PORT) {
+		const struct pmd_internals *const internals =
+				dev->data->dev_private;
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: illegal index %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index, index,
+		       NUM_MAC_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	eth_addrs[index] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_mac_addr_set(struct rte_eth_dev *dev,
+			    struct rte_ether_addr *mac_addr)
+{
+	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
+
+	eth_addrs[0U] = *mac_addr;
+
+	return 0;
+}
+
+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,
+				struct rte_ether_addr *mc_addr_set,
+				uint32_t nb_mc_addr)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
+	size_t i;
+
+	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\n",
+		       __FILE__, __func__, __LINE__, internals->if_index,
+		       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
+		return -1;
+	}
+
+	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {
+		if (i < nb_mc_addr)
+			mc_addrs[i] = mc_addr_set[i];
+
+		else
+			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
+	}
+
+	return 0;
+}
+
+static int eth_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] Called for eth_dev %p\n", __func__,
+	       __func__, __LINE__, eth_dev);
+
+	p_drv->probe_finished = 1;
+
+	/* The device is ALWAYS running promiscuous mode. */
+	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
+	return 0;
+}
+
+static int eth_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u\n", __func__, __func__,
+	       __LINE__, internals->n_intf_no, internals->if_index);
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+	} else {
+		/*
+		 * wait for link on port
+		 * If application starts sending too soon before FPGA port is ready, garbage is
+		 * produced
+		 */
+		int loop = 0;
+
+		while (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==
+				ETH_LINK_DOWN) {
+			/* break out after 5 sec */
+			if (++loop >= 50) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: TIMEOUT No link on port %i (5sec timeout)\n",
+				       __func__, internals->n_intf_no);
+				break;
+			}
+			usleep(100000);
+		}
+		assert(internals->n_intf_no ==
+		       internals->if_index); /* Sanity check */
+		if (internals->lpbk_mode) {
+			if (internals->lpbk_mode & 1 << 0) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_HOST);
+			}
+			if (internals->lpbk_mode & 1 << 1) {
+				nt4ga_port_set_loopback_mode(p_adapter_info,
+							     n_intf_no,
+							     NT_LINK_LOOPBACK_LINE);
+			}
+		}
+	}
+	return 0;
+}
+
+static int eth_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	const int n_intf_no = internals->if_index;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] - Port %u, %u, type %u\n", __func__,
+	       __func__, __LINE__, internals->n_intf_no, internals->if_index,
+	       internals->type);
+
+	if (internals->type != PORT_TYPE_VIRTUAL) {
+		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
+		struct ntnic_tx_queue *tx_q = internals->txq_scg;
+
+		uint q;
+
+		for (q = 0; q < internals->nb_rx_queues; q++)
+			nthw_release_managed_rx_virt_queue(rx_q[q].vq);
+
+		for (q = 0; q < internals->nb_tx_queues; q++)
+			nthw_release_managed_tx_virt_queue(tx_q[q].vq);
+
+		nt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);
+		nt4ga_port_set_link_speed(p_adapter_info, n_intf_no,
+					  NT_LINK_SPEED_UNKNOWN);
+		nt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,
+					     NT_LINK_LOOPBACK_OFF);
+	}
+
+	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+	return 0;
+}
+
+static int eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_adm_state(p_adapter_info, port, true);
+
+	return 0;
+}
+
+static int eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *const internals = dev->data->dev_private;
+	struct adapter_info_s *p_adapter_info =
+			&internals->p_drv->ntdrv.adapter_info;
+	const int port = internals->if_index;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
+	assert(port == internals->n_intf_no);
+
+	nt4ga_port_set_link_status(p_adapter_info, port, false);
+
+	return 0;
+}
+
+static int eth_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	(void)pci_dev; /* UNUSED */
+
+	NT_LOG(DBG, ETHDEV, "%s: enter [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+
+	internals->p_drv = NULL;
+
+	/* LAG cleanup */
+	if (internals->lag_config) {
+		if (internals->lag_config->lag_tid) {
+			internals->lag_config->lag_thread_active = 0;
+			pthread_join(internals->lag_config->lag_tid, NULL);
+		}
+		lag_active = 0;
+		rte_free(internals->lag_config);
+	}
+
+	/* free */
+	rte_free(internals);
+	internals = NULL;
+
+	eth_dev->data->dev_private = NULL;
+	eth_dev->data->mac_addrs = NULL;
+
+	/* release */
+	rte_eth_dev_release_port(eth_dev);
+
+	NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+	       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+	p_drv->n_eth_dev_init_count--;
+
+	/*
+	 * rte_pci_dev has no private member for p_drv
+	 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
+	 */
+	if (!p_drv->n_eth_dev_init_count && p_drv) {
+		NT_LOG(DBG, ETHDEV, "%s: %d [%s:%u]\n", __func__,
+		       p_drv->n_eth_dev_init_count, __func__, __LINE__);
+		p_drv->ntdrv.b_shutdown = true;
+		void *p_ret_val = NULL;
+
+		pthread_join(p_nt_drv->stat_thread, &p_ret_val);
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+			p_ret_val = NULL;
+			pthread_join(p_nt_drv->flm_thread, &p_ret_val);
+		}
+		nt4ga_adapter_deinit(&p_nt_drv->adapter_info);
+		rte_free(p_drv);
+	}
+	NT_LOG(DBG, ETHDEV, "%s: leave [%s:%u]\n", __func__, __func__,
+	       __LINE__);
+	return 0;
+}
+
+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+			      size_t fw_size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_VIRTUAL ||
+			internals->type == PORT_TYPE_OVERRIDE)
+		return 0;
+
+	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
+	const int length =
+		snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
+			 fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
+			 fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
+	if ((size_t)length < fw_size) {
+		/* We have space for the version string */
+		return 0;
+	}
+	/* We do not have space for the version string -return the needed space */
+	return length + 1;
+}
+
+static int eth_xstats_get(struct rte_eth_dev *eth_dev,
+			  struct rte_eth_xstat *stats, unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,
+				    p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+				const uint64_t *ids, uint64_t *values,
+				unsigned int n)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+	int nb_xstats;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,
+					  p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return nb_xstats;
+}
+
+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+	int if_index = internals->if_index;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+	nthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+	return dpdk_stats_reset(internals, p_nt_drv, if_index);
+}
+
+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,
+				struct rte_eth_xstat_name *xstats_names,
+				unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,
+				     p_nthw_stat->mb_is_vswitch);
+}
+
+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+				      const uint64_t *ids,
+				      struct rte_eth_xstat_name *xstats_names,
+				      unsigned int size)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct drv_s *p_drv = internals->p_drv;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	return nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,
+					   p_nthw_stat->mb_is_vswitch);
+}
+
+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_ops **ops)
+{
+	*ops = &_dev_flow_ops;
+	return 0;
+}
+
+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)
+{
+	NT_LOG(DBG, NTHW, "The device always run promiscuous mode.");
+	return 0;
+}
+
+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+	const int hsh_idx =
+		0; /* hsh index 0 means the default receipt in HSH module */
+	int res = flow_nic_set_hasher_fields(ndev, hsh_idx,
+					     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));
+	res |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
+	return res;
+}
+
+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+			     struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)eth_dev->data->dev_private;
+	struct flow_eth_dev *fedev = internals->flw_dev;
+	struct flow_nic_dev *ndev = fedev->ndev;
+
+	rss_conf->rss_key = NULL;
+	rss_conf->rss_key_len = 0;
+	rss_conf->rss_hf |=
+		dpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);
+	return 0;
+}
+
+static struct eth_dev_ops nthw_eth_dev_ops = {
+	.dev_configure = eth_dev_configure,
+	.dev_start = eth_dev_start,
+	.dev_stop = eth_dev_stop,
+	.dev_set_link_up = eth_dev_set_link_up,
+	.dev_set_link_down = eth_dev_set_link_down,
+	.dev_close = eth_dev_close,
+	.link_update = eth_link_update,
+	.stats_get = eth_stats_get,
+	.stats_reset = eth_stats_reset,
+	.dev_infos_get = eth_dev_infos_get,
+	.fw_version_get = eth_fw_version_get,
+	.rx_queue_setup = eth_rx_scg_queue_setup,
+	.rx_queue_start = eth_rx_queue_start,
+	.rx_queue_stop = eth_rx_queue_stop,
+	.rx_queue_release = eth_rx_queue_release,
+	.tx_queue_setup = eth_tx_scg_queue_setup,
+	.tx_queue_start = eth_tx_queue_start,
+	.tx_queue_stop = eth_tx_queue_stop,
+	.tx_queue_release = eth_tx_queue_release,
+	.mac_addr_remove = eth_mac_addr_remove,
+	.mac_addr_add = eth_mac_addr_add,
+	.mac_addr_set = eth_mac_addr_set,
+	.set_mc_addr_list = eth_set_mc_addr_list,
+	.xstats_get = eth_xstats_get,
+	.xstats_get_names = eth_xstats_get_names,
+	.xstats_reset = eth_xstats_reset,
+	.xstats_get_by_id = eth_xstats_get_by_id,
+	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
+	.mtu_set = NULL,
+	.mtr_ops_get = eth_mtr_ops_get,
+	.flow_ops_get = _dev_flow_ops_get,
+	.promiscuous_disable = NULL,
+	.promiscuous_enable = promiscuous_enable,
+	.rss_hash_update = eth_dev_rss_hash_update,
+	.rss_hash_conf_get = rss_hash_conf_get,
+};
+
+/* Converts link speed provided in Mbps to NT specific definitions.*/
+static nt_link_speed_t convert_link_speed(int link_speed_mbps)
+{
+	switch (link_speed_mbps) {
+	case 10:
+		return NT_LINK_SPEED_10M;
+	case 100:
+		return NT_LINK_SPEED_100M;
+	case 1000:
+		return NT_LINK_SPEED_1G;
+	case 10000:
+		return NT_LINK_SPEED_10G;
+	case 40000:
+		return NT_LINK_SPEED_40G;
+	case 100000:
+		return NT_LINK_SPEED_100G;
+	case 50000:
+		return NT_LINK_SPEED_50G;
+	case 25000:
+		return NT_LINK_SPEED_25G;
+	default:
+		return NT_LINK_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Adapter flm stat thread
+ */
+static void *adapter_flm_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: waiting for port configuration\n",
+	       p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		usleep(1000000);
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		if (flm_mtr_update_stats(dev) == 0)
+			usleep(10);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_info->mp_adapter_id_str,
+	       __func__);
+
+	return NULL;
+}
+
+/*
+ * Adapter stat thread
+ */
+static void *adapter_stat_thread_fn(void *context)
+{
+	struct drv_s *p_drv = context;
+	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+
+	const char *const p_adapter_id_str _unused =
+		p_nt_drv->adapter_info.mp_adapter_id_str;
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: begin\n", p_adapter_id_str, __func__);
+
+	assert(p_nthw_stat);
+
+	while (!p_drv->ntdrv.b_shutdown) {
+		usleep(100 * 100);
+
+		nthw_stat_trigger(p_nthw_stat);
+
+		uint32_t loop = 0;
+
+		while ((!p_drv->ntdrv.b_shutdown) &&
+				(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
+			usleep(1 * 100);
+
+			if (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&
+					(++loop & 0x3fff) == 0) {
+				uint32_t sf_ram_of =
+					nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t descr_fifo_of =
+				nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);
+
+				uint32_t dbg_merge =
+					nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
+				uint32_t mac_if_err =
+					nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
+
+				NT_LOG(ERR, ETHDEV, "Statistics DMA frozen\n");
+				NT_LOG(ERR, ETHDEV,
+				       "SF RAM Overflow     : %08x\n",
+				       sf_ram_of);
+				NT_LOG(ERR, ETHDEV,
+				       "Descr Fifo Overflow : %08x\n",
+				       descr_fifo_of);
+				NT_LOG(ERR, ETHDEV,
+				       "DBG Merge           : %08x\n",
+				       dbg_merge);
+				NT_LOG(ERR, ETHDEV,
+				       "MAC If Errors       : %08x\n",
+				       mac_if_err);
+			}
+		}
+
+		/* Check then collect */
+		{
+			pthread_mutex_lock(&p_nt_drv->stat_lck);
+			nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
+			pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: %s: end\n", p_adapter_id_str, __func__);
+
+	return NULL;
+}
+
+static struct {
+	struct rte_pci_device *vpf_dev;
+	struct rte_eth_devargs eth_da;
+	int portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];
+	uint16_t pf_backer_port_id;
+} rep;
+
+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)
+{
+	int res;
+	struct drv_s *p_drv;
+	ntdrv_4ga_t *p_nt_drv;
+	fpga_info_t *fpga_info;
+
+	hw_info_t *p_hw_info _unused;
+	uint32_t n_port_mask = -1; /* All ports enabled by default */
+	uint32_t nb_rx_queues = 1;
+	uint32_t nb_tx_queues = 1;
+	uint32_t exception_path = 0;
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];
+	lag_config_t *lag_config = NULL;
+	int n_phy_ports;
+	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = {{ 0 }};
+	int num_port_speeds = 0;
+	enum flow_eth_dev_profile profile;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, ETHDEV, "Dev %s PF #%i Init : %02x:%02x:%i\n",
+	       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+
+	/*
+	 * Process options/arguments
+	 */
+	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
+		int kvargs_count;
+		struct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
+							     valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		/*
+		 * Argument: help
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
+				size_t i;
+
+				printf("NTNIC supported arguments:\n\n");
+				for (i = 0; i < RTE_DIM(valid_arguments); i++) {
+					if (valid_arguments[i] == NULL)
+						break;
+					printf("  %s\n", valid_arguments[i]);
+				}
+				printf("\n");
+				exit(0);
+			}
+		}
+
+		/*
+		 * Argument: supported-fpgas=list|verbose
+		 * NOTE: this argument/option check should be the first as it will stop
+		 * execution after producing its output
+		 */
+		{
+			const char *val_str;
+
+			val_str = rte_kvargs_get(kvlist,
+						 ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+			if (val_str) {
+				int detail_level = 0;
+				nt_fpga_mgr_t *p_fpga_mgr = NULL;
+
+				if (strcmp(val_str, "list") == 0) {
+					detail_level = 0;
+				} else if (strcmp(val_str, "verbose") == 0) {
+					detail_level = 1;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: argument '%s': '%s': unsupported value\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,
+					       val_str);
+					exit(1);
+				}
+				/* Produce fpgamgr output and exit hard */
+				p_fpga_mgr = fpga_mgr_new();
+				if (p_fpga_mgr) {
+					fpga_mgr_init(p_fpga_mgr);
+					fpga_mgr_show(p_fpga_mgr, stdout,
+						     detail_level);
+					fpga_mgr_delete(p_fpga_mgr);
+					p_fpga_mgr = NULL;
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: %s cannot complete\n",
+					       __func__,
+					       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);
+					exit(1);
+				}
+				exit(0);
+			}
+		}
+
+		/* link_speed options/argument only applicable for physical ports. */
+		num_port_speeds =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);
+		if (num_port_speeds) {
+			assert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);
+			void *pls_mbps_ptr = &pls_mbps[0];
+
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_LINK_SPEED_ARG,
+						 &string_to_port_link_speed,
+						 &pls_mbps_ptr);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with port link speed command "
+				       "line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			for (int i = 0; i < num_port_speeds; ++i) {
+				int pid = pls_mbps[i].port_id;
+
+				int lspeed _unused = pls_mbps[i].link_speed;
+
+				NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%d.%d\n",
+				       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,
+				       pid, lspeed);
+				if (pls_mbps[i].port_id >=
+						NUM_ADAPTER_PORTS_MAX) {
+					NT_LOG(ERR, ETHDEV,
+					       "%s: problem with port link speed command line "
+					       "arguments: port id should be 0 to %d, got %d\n",
+					       __func__, NUM_ADAPTER_PORTS_MAX,
+					       pid);
+					return -1;
+				}
+			}
+		}
+
+		/*
+		 * portmask option/argument
+		 * It is intentional that portmask is only used to decide if DPDK eth_dev
+		 * should be created for testing we would still keep the nthw subsystems
+		 * running for all interfaces
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_PORTMASK_ARG,
+						 &string_to_u32, &n_port_mask);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);
+		}
+
+		/*
+		 * rxq option/argument
+		 * The number of rxq (hostbuffers) allocated in memory.
+		 * Default is 32 RX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_RXQUEUES_ARG,
+						 &string_to_u32, &nb_rx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
+		}
+
+		/*
+		 * txq option/argument
+		 * The number of txq (hostbuffers) allocated in memory.
+		 * Default is 32 TX Hostbuffers
+		 */
+		kvargs_count =
+			rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_TXQUEUES_ARG,
+						 &string_to_u32, &nb_tx_queues);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);
+		if (kvargs_count) {
+			lag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);
+			if (lag_config == NULL) {
+				NT_LOG(ERR, ETHDEV,
+				       "Failed to alloc lag_config data\n");
+				return -1;
+			}
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,
+						 &string_to_u32,
+						 &lag_config->mode);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);
+			lag_active = 1;
+		}
+
+		kvargs_count = rte_kvargs_count(kvlist,
+						ETH_DEV_NTHW_EXCEPTION_PATH_ARG);
+		if (kvargs_count) {
+			assert(kvargs_count == 1);
+			res = rte_kvargs_process(kvlist,
+						 ETH_DEV_NTHW_EXCEPTION_PATH_ARG,
+						 &string_to_u32, &exception_path);
+			if (res < 0) {
+				NT_LOG(ERR, ETHDEV,
+				       "%s: problem with command line arguments: res=%d\n",
+				       __func__, res);
+				return -1;
+			}
+			NT_LOG(DBG, ETHDEV, "%s: devargs: %s=%u\n", __func__,
+			       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);
+		}
+
+		if (lag_active && lag_config) {
+			switch (lag_config->mode) {
+			case BONDING_MODE_ACTIVE_BACKUP:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Backup LAG mode\n");
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_PRIMARY_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_PRIMARY_ARG,
+								 &string_to_u32,
+								 &lag_config->primary_port);
+					if (res < 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a primary port\n");
+					return -1;
+				}
+
+				kvargs_count = rte_kvargs_count(kvlist,
+								ETH_NTNIC_LAG_BACKUP_ARG);
+				if (kvargs_count) {
+					assert(kvargs_count == 1);
+					res = rte_kvargs_process(kvlist,
+								 ETH_NTNIC_LAG_BACKUP_ARG,
+								 &string_to_u32,
+								 &lag_config->backup_port);
+					if (res != 0) {
+						NT_LOG(ERR, ETHDEV,
+						       "%s: problem with command line "
+						       "arguments: res=%d\n",
+						       __func__, res);
+						return -1;
+					}
+					NT_LOG(DBG, ETHDEV,
+					       "%s: devargs: %s=%u\n", __func__,
+					       ETH_NTNIC_LAG_MODE_ARG,
+					       nb_tx_queues);
+				} else {
+					NT_LOG(ERR, ETHDEV,
+					       "LAG must define a backup port\n");
+					return -1;
+				}
+				break;
+
+			case BONDING_MODE_8023AD:
+				NT_LOG(DBG, ETHDEV,
+				       "Active / Active LAG mode\n");
+				lag_config->primary_port = 0;
+				lag_config->backup_port = 0;
+				break;
+
+			default:
+				NT_LOG(ERR, ETHDEV, "Unsupported LAG mode\n");
+				return -1;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
+	/* parse representor args */
+	if (setup_virtual_pf_representor_base(pci_dev) == -1) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: setup_virtual_pf_representor_base error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* alloc */
+	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),
+				  RTE_CACHE_LINE_SIZE,
+				  pci_dev->device.numa_node);
+	if (!p_drv) {
+		NT_LOG(ERR, ETHDEV, "%s: error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	/* Setup VFIO context */
+	int vfio = nt_vfio_setup(pci_dev);
+
+	if (vfio < 0) {
+		NT_LOG(ERR, ETHDEV, "%s: vfio_setup error %d (%s:%u)\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), -1, __func__,
+		       __LINE__);
+		rte_free(p_drv);
+		return -1;
+	}
+
+	p_drv->probe_finished = 0;
+	/* context */
+	p_nt_drv = &p_drv->ntdrv;
+	fpga_info = &p_nt_drv->adapter_info.fpga_info;
+	p_hw_info = &p_nt_drv->adapter_info.hw_info;
+
+	p_drv->p_dev = pci_dev;
+
+	/* Set context for NtDrv */
+	p_nt_drv->pciident =
+		BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
+				pci_dev->addr.devid, pci_dev->addr.function);
+	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
+	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
+
+	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
+	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
+	NT_LOG(DBG, ETHDEV, "bar0=0x%" PRIX64 " len=%d\n", fpga_info->bar0_addr,
+	       fpga_info->bar0_size);
+	fpga_info->numa_node = pci_dev->device.numa_node;
+	fpga_info->pciident = p_nt_drv->pciident;
+	fpga_info->adapter_no = p_drv->adapter_no;
+
+	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
+	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =
+		pci_dev->id.subsystem_vendor_id;
+	p_nt_drv->adapter_info.hw_info.pci_sub_device_id =
+		pci_dev->id.subsystem_device_id;
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:\n",
+	       p_nt_drv->adapter_info.mp_adapter_id_str,
+	       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+	       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+	       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+	       p_nt_drv->adapter_info.hw_info.pci_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_device_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
+	       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
+
+	p_nt_drv->b_shutdown = false;
+	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
+
+	for (int i = 0; i < num_port_speeds; ++i) {
+		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+		nt_link_speed_t link_speed =
+			convert_link_speed(pls_mbps[i].link_speed);
+		nt4ga_port_set_link_speed(p_adapter_info, i, link_speed);
+	}
+
+	/* store context */
+	store_pdrv(p_drv);
+
+	/* initialize nt4ga nthw fpga module instance in drv */
+	int err = nt4ga_adapter_init(&p_nt_drv->adapter_info);
+
+	if (err != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "%s: Cannot initialize the adapter instance\n",
+		       p_nt_drv->adapter_info.mp_adapter_id_str);
+		return -1;
+	}
+
+	if (fpga_info->mp_nthw_epp != NULL)
+		nthw_eth_dev_ops.mtu_set = dev_set_mtu;
+
+	/* Initialize the queue system */
+	if (err == 0) {
+		err = nthw_virt_queue_init(fpga_info);
+		if (err != 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "%s: Cannot initialize scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		} else {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: Initialized scatter-gather queues\n",
+			       p_nt_drv->adapter_info.mp_adapter_id_str);
+		}
+	}
+
+	switch (fpga_info->profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		profile = FLOW_ETH_DEV_PROFILE_VSWITCH;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		profile = FLOW_ETH_DEV_PROFILE_INLINE;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, ETHDEV, "%s: fpga profile not supported [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (err == 0) {
+		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
+		const char *const p_adapter_id_str _unused =
+			p_nt_drv->adapter_info.mp_adapter_id_str;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR
+		       " Hw=0x%02X_rev%d PhyPorts=%d\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
+		       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
+		       fpga_info->n_phy_ports);
+	} else {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), err, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+	res = rte_ctrl_thread_create(&p_nt_drv->stat_thread, "nt4ga_stat_thr",
+				     NULL, adapter_stat_thread_fn,
+				     (void *)p_drv);
+	if (res) {
+		NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), res, __func__,
+		       __LINE__);
+		return -1;
+	}
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		res = rte_ctrl_thread_create(&p_nt_drv->flm_thread,
+					     "nt_flm_stat_thr", NULL,
+					     adapter_flm_thread_fn,
+					     (void *)p_drv);
+		if (res) {
+			NT_LOG(ERR, ETHDEV, "%s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), res,
+			       __func__, __LINE__);
+			return -1;
+		}
+	}
+
+	if (lag_config) {
+		/* LAG is activated, so only use port 0 */
+		n_phy_ports = 1;
+	} else {
+		n_phy_ports = fpga_info->n_phy_ports;
+	}
+	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
+		const char *const p_port_id_str _unused =
+			p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
+		struct pmd_internals *internals = NULL;
+		struct rte_eth_dev *eth_dev;
+		char name[32];
+		int i;
+
+		if ((1 << n_intf_no) & ~n_port_mask) {
+			NT_LOG(DBG, ETHDEV,
+			       "%s: %s: interface #%d: skipping due to portmask 0x%02X\n",
+			       __func__, p_port_id_str, n_intf_no, n_port_mask);
+			continue;
+		}
+
+		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
+		NT_LOG(DBG, ETHDEV, "%s: %s: interface #%d: %s: '%s'\n",
+		       __func__, p_port_id_str, n_intf_no,
+		       (pci_dev->name[0] ? pci_dev->name : "NA"), name);
+
+		internals = rte_zmalloc_socket(name,
+					       sizeof(struct pmd_internals),
+					       RTE_CACHE_LINE_SIZE,
+					       pci_dev->device.numa_node);
+		if (!internals) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->pci_dev = pci_dev;
+		internals->n_intf_no = n_intf_no;
+		internals->if_index = n_intf_no;
+		internals->min_tx_pkt_size = 64;
+		internals->max_tx_pkt_size = 10000;
+		internals->type = PORT_TYPE_PHYSICAL;
+		internals->vhid = -1;
+		internals->port = n_intf_no;
+		internals->nb_rx_queues = nb_rx_queues;
+		internals->nb_tx_queues = nb_tx_queues;
+
+		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
+		for (i = 0; i < MAX_QUEUES; i++)
+			internals->vpq[i].hw_id = -1;
+
+		/* Setup queue_ids */
+		if (nb_rx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Rx multi queues. %i queues\n",
+			       0 /*port*/, nb_rx_queues);
+		}
+
+		if (nb_tx_queues > 1) {
+			NT_LOG(DBG, ETHDEV,
+			       "(%i) NTNIC configured with Tx multi queues. %i queues\n",
+			       0 /*port*/, nb_tx_queues);
+		}
+
+		int max_num_queues = (nb_rx_queues > nb_tx_queues) ?
+				     nb_rx_queues :
+				     nb_tx_queues;
+		int start_queue = allocate_queue(max_num_queues);
+
+		if (start_queue < 0)
+			return -1;
+
+		for (i = 0; i < (int)max_num_queues; i++) {
+			queue_ids[i].id    = i;
+			queue_ids[i].hw_id = start_queue + i;
+
+			internals->rxq_scg[i].queue = queue_ids[i];
+			/* use same index in Rx and Tx rings */
+			internals->txq_scg[i].queue = queue_ids[i];
+			internals->rxq_scg[i].enabled = 0;
+			internals->txq_scg[i].type = internals->type;
+			internals->rxq_scg[i].type = internals->type;
+			internals->rxq_scg[i].port = internals->port;
+		}
+
+		/* no tx queues - tx data goes out on phy */
+		internals->vpq_nb_vq = 0;
+
+		for (i = 0; i < (int)nb_tx_queues; i++) {
+			internals->txq_scg[i].port = internals->port;
+			internals->txq_scg[i].enabled = 0;
+		}
+
+		/* Set MAC address (but only if the MAC address is permitted) */
+		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
+			const uint64_t mac =
+				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +
+				n_intf_no;
+			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &
+								0xFFu;
+			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &
+								0xFFu;
+		}
+
+		eth_dev = rte_eth_dev_allocate(name);
+		if (!eth_dev) {
+			NT_LOG(ERR, ETHDEV, "%s: %s: error=%d [%s:%u]\n",
+			       (pci_dev->name[0] ? pci_dev->name : "NA"), name,
+			       -1, __func__, __LINE__);
+			return -1;
+		}
+
+		internals->flw_dev = flow_get_eth_dev(0, n_intf_no,
+						      eth_dev->data->port_id,
+						      nb_rx_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      profile, exception_path);
+		if (!internals->flw_dev) {
+			NT_LOG(ERR, VDPA,
+			       "Error creating port. Resource exhaustion in HW\n");
+			return -1;
+		}
+
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+		       __func__, __func__, __LINE__, eth_dev,
+		       eth_dev->data->port_id, n_intf_no);
+
+		/* connect structs */
+		internals->p_drv = p_drv;
+		eth_dev->data->dev_private = internals;
+		eth_dev->data->mac_addrs = internals->eth_addrs;
+
+		internals->port_id = eth_dev->data->port_id;
+
+		/*
+		 * if representor ports defined on this PF set the assigned port_id as the
+		 * backer_port_id for the VFs
+		 */
+		if (rep.vpf_dev == pci_dev)
+			rep.pf_backer_port_id = eth_dev->data->port_id;
+		NT_LOG(DBG, ETHDEV,
+		       "%s: [%s:%u] Setting up RX functions for SCG\n",
+		       __func__, __func__, __LINE__);
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+		eth_dev->tx_pkt_prepare = NULL;
+
+		struct rte_eth_link pmd_link;
+
+		pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+		pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+		pmd_link.link_status = ETH_LINK_DOWN;
+		pmd_link.link_autoneg = ETH_LINK_AUTONEG;
+
+		eth_dev->device = &pci_dev->device;
+		eth_dev->data->dev_link = pmd_link;
+		eth_dev->data->numa_node = pci_dev->device.numa_node;
+		eth_dev->dev_ops = &nthw_eth_dev_ops;
+		eth_dev->state = RTE_ETH_DEV_ATTACHED;
+
+		rte_eth_copy_pci_info(eth_dev, pci_dev);
+		eth_dev_pci_specific_init(eth_dev,
+					  pci_dev); /* performs rte_eth_copy_pci_info() */
+
+		p_drv->n_eth_dev_init_count++;
+
+		if (lag_config) {
+			internals->lag_config = lag_config;
+			lag_config->internals = internals;
+
+			/* Always merge port 0 and port 1 on a LAG bond */
+			lag_set_port_group(0, (uint32_t)0x01);
+			lag_config->lag_thread_active = 1;
+			pthread_create(&lag_config->lag_tid, NULL,
+				       lag_management, lag_config);
+		}
+
+		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+				internals->flw_dev->ndev->be.tpe.ver >= 2) {
+			assert(nthw_eth_dev_ops.mtu_set ==
+			       dev_set_mtu_inline ||
+			       nthw_eth_dev_ops.mtu_set == NULL);
+			nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+			dev_set_mtu_inline(eth_dev, MTUINITVAL);
+			NT_LOG(DBG, ETHDEV,
+			       "%s INLINE MTU supported, tpe version %d\n",
+			       __func__, internals->flw_dev->ndev->be.tpe.ver);
+		} else {
+			NT_LOG(DBG, ETHDEV, "INLINE MTU not supported");
+		}
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] leave\n", __func__, __FILE__, __LINE__);
+
+#ifdef NT_TOOLS
+	/*
+	 * If NtConnect interface must be started for external tools
+	 */
+	ntconn_adap_register(p_drv);
+	ntconn_stat_register(p_drv);
+
+	/* Determine CPU used by the DPDK */
+	cpu_set_t cpuset;
+	unsigned int lcore_id;
+
+	CPU_ZERO(&cpuset);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (rte_lcore_has_role(lcore_id, ROLE_OFF))
+			continue;
+		rte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);
+
+		RTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);
+	}
+	/* Set available CPU for ntconnect */
+	RTE_CPU_NOT(&cpuset, &cpuset);
+
+	ntconn_flow_register(p_drv);
+	ntconn_meter_register(p_drv);
+#ifdef NTCONNECT_TEST
+	ntconn_test_register(p_drv);
+#endif
+	ntconnect_init(NTCONNECT_SOCKET, cpuset);
+#endif
+
+	return 0;
+}
+
+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+	int i;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+	struct pmd_internals *internals = pmd_intern_base;
+
+	sleep(1); /* let running threads end Rx and Tx activity */
+
+	while (internals) {
+		for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);
+			release_hw_virtio_queues(&internals->txq_scg[i].hwq);
+		}
+
+		for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
+			nthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);
+			release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
+		}
+		internals = internals->next;
+	}
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq != NULL) {
+			if (rel_virt_queue[i].rx) {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_rx_virt_queue(rel_virt_queue[i].vq);
+			} else {
+				if (rel_virt_queue[i].managed)
+					nthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);
+				else
+					nthw_release_tx_virt_queue(rel_virt_queue[i].vq);
+			}
+			rel_virt_queue[i].vq = NULL;
+		}
+	}
+
+	nt_vfio_remove(EXCEPTION_PATH_HID);
+
+	return 0;
+}
+
+static void signal_handler_func_int(int sig)
+{
+	if (sig != SIGINT) {
+		signal(sig, previous_handler);
+		raise(sig);
+		return;
+	}
+	kill_pmd = 1;
+}
+
+static void *shutdown_thread(void *arg __rte_unused)
+{
+	struct rte_eth_dev dummy;
+
+	while (!kill_pmd)
+		usleep(100000);
+
+	NT_LOG(DBG, ETHDEV, "%s: Shutting down because of ctrl+C\n", __func__);
+	nthw_pci_dev_deinit(&dummy);
+
+	signal(SIGINT, previous_handler);
+	raise(SIGINT);
+
+	return NULL;
+}
+
+static int init_shutdown(void)
+{
+	NT_LOG(DBG, ETHDEV, "%s: Starting shutdown handler\n", __func__);
+	kill_pmd = 0;
+	previous_handler = signal(SIGINT, signal_handler_func_int);
+	pthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);
+
+	/*
+	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
+	 * flooding by OVS from multiple virtual port threads - no need to be precise
+	 */
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	usleep(10000);
+	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
+
+	return 0;
+}
+
+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			  struct rte_pci_device *pci_dev)
+{
+	int res;
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+
+#if defined(DEBUG)
+	NT_LOG(DBG, NTHW, "Testing NTHW %u [%s:%u]\n",
+	       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],
+	       __func__, __LINE__);
+#endif
+
+	NT_LOG(DBG, ETHDEV, "%s: pcidev: name: '%s'\n", __func__,
+	       pci_dev->name);
+	NT_LOG(DBG, ETHDEV, "%s: devargs: name: '%s'\n", __func__,
+	       pci_dev->device.name);
+	if (pci_dev->device.devargs) {
+		NT_LOG(DBG, ETHDEV, "%s: devargs: args: '%s'\n", __func__,
+		       (pci_dev->device.devargs->args ?
+			pci_dev->device.devargs->args :
+			"NULL"));
+		NT_LOG(DBG, ETHDEV, "%s: devargs: data: '%s'\n", __func__,
+		       (pci_dev->device.devargs->data ?
+			pci_dev->device.devargs->data :
+			"NULL"));
+	}
+
+	const int n_rte_has_pci = rte_eal_has_pci();
+
+	NT_LOG(DBG, ETHDEV, "has_pci=%d\n", n_rte_has_pci);
+	if (n_rte_has_pci == 0) {
+		NT_LOG(ERR, ETHDEV, "has_pci=%d: this PMD needs hugepages\n",
+		       n_rte_has_pci);
+		return -1;
+	}
+
+	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
+
+	NT_LOG(DBG, ETHDEV, "vfio_no_iommu_enabled=%d\n",
+	       n_rte_vfio_no_io_mmu_enabled);
+	if (n_rte_vfio_no_io_mmu_enabled) {
+		NT_LOG(ERR, ETHDEV,
+		       "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\n",
+		       n_rte_vfio_no_io_mmu_enabled);
+		return -1;
+	}
+
+	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
+
+	NT_LOG(DBG, ETHDEV, "iova mode=%d\n", n_rte_io_va_mode);
+	if (n_rte_io_va_mode != RTE_IOVA_PA) {
+		NT_LOG(WRN, ETHDEV,
+		       "iova mode (%d) should be PA for performance reasons\n",
+		       n_rte_io_va_mode);
+	}
+
+	const int n_rte_has_huge_pages = rte_eal_has_hugepages();
+
+	NT_LOG(DBG, ETHDEV, "has_hugepages=%d\n", n_rte_has_huge_pages);
+	if (n_rte_has_huge_pages == 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "has_hugepages=%d: this PMD needs hugepages\n",
+		       n_rte_has_huge_pages);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "busid=" PCI_PRI_FMT
+	       " pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\n",
+	       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+	       pci_dev->addr.function, pci_dev->id.vendor_id,
+	       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,
+	       pci_dev->id.subsystem_device_id,
+	       pci_dev->name[0] ? pci_dev->name : "NA", /* locstr */
+	       pci_dev->device.numa_node,
+	       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :
+	       "NA",
+	       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :
+	       "NA");
+
+	if (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {
+		if (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||
+				pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {
+			if (pci_dev->id.subsystem_device_id != 0x01) {
+				NT_LOG(DBG, ETHDEV,
+				       "%s: PCIe bifurcation - secondary endpoint "
+				       "found - leaving probe\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	res = nthw_pci_dev_init(pci_dev);
+
+	init_shutdown();
+
+	NT_LOG(DBG, ETHDEV, "%s: leave: res=%d\n", __func__, res);
+	return res;
+}
+
+static int nthw_pci_remove(struct rte_pci_device *pci_dev)
+{
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
+}
+
+static int nt_log_init_impl(void)
+{
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	for (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {
+		int index = NT_LOG_MODULE_INDEX(i);
+
+		nt_log_module_logtype[index] =
+			rte_log_register_type_and_pick_level(nt_log_module_eal_name[index],
+							     RTE_LOG_INFO);
+	}
+
+	NT_LOG(DBG, ETHDEV, "%s: [%s:%u]\n", __func__, __func__, __LINE__);
+
+	return 0;
+}
+
+__rte_format_printf(3, 0)
+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,
+			   const char *format, va_list args)
+{
+	uint32_t rte_level = 0;
+	uint32_t rte_module = 0;
+
+	switch (level) {
+	case NT_LOG_ERR:
+		rte_level = RTE_LOG_ERR;
+		break;
+	case NT_LOG_WRN:
+		rte_level = RTE_LOG_WARNING;
+		break;
+	case NT_LOG_INF:
+		rte_level = RTE_LOG_INFO;
+		break;
+	default:
+		rte_level = RTE_LOG_DEBUG;
+	}
+
+	rte_module =
+		(module >= NT_LOG_MODULE_GENERAL &&
+		 module < NT_LOG_MODULE_END) ?
+		(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;
+
+	return (int)rte_vlog(rte_level, rte_module, format, args);
+}
+
+static int nt_log_is_debug_impl(uint32_t module)
+{
+	if (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)
+		return -1;
+	int index = NT_LOG_MODULE_INDEX(module);
+
+	return rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;
+}
+
+RTE_INIT(ntnic_rte_init); /* must go before function */
+
+static void ntnic_rte_init(void)
+{
+	static struct nt_log_impl impl = { .init = &nt_log_init_impl,
+		       .log = &nt_log_log_impl,
+		       .is_debug = &nt_log_is_debug_impl
+	};
+
+	nt_log_init(&impl);
+}
+
+static struct rte_pci_driver rte_nthw_pmd = {
+	.driver = {
+		.name = "net_ntnic",
+	},
+
+	.id_table = nthw_pci_id_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = nthw_pci_probe,
+	.remove = nthw_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
+
+/*
+ * VF and VDPA code
+ */
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == NULL) {
+			rel_virt_queue[i].vq = vq;
+			rel_virt_queue[i].rx = rx;
+			rel_virt_queue[i].managed = managed;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)
+{
+	int i;
+
+	for (i = 0; i < MAX_REL_VQS; i++) {
+		if (rel_virt_queue[i].vq == vq) {
+			rel_virt_queue[i].vq = NULL;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+struct pmd_internals *vp_vhid_instance_ready(int vhid)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		if (intern->vhid == vhid)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+struct pmd_internals *vp_path_instance_ready(const char *path)
+{
+	struct pmd_internals *intern = pmd_intern_base;
+
+	while (intern) {
+		printf("Searching for path: \"%s\" == \"%s\" (%d)\n",
+		       intern->vhost_path, path,
+		       strcmp(intern->vhost_path, path));
+		if (strcmp(intern->vhost_path, path) == 0)
+			return intern;
+		intern = intern->next;
+	}
+	return NULL;
+}
+
+static void read_port_queues_mapping(char *str, int *portq)
+{
+	int len;
+	char *tok;
+
+	while (*str != '[' && *str != '\0')
+		str++;
+
+	if (*str == '\0')
+		return;
+	str++;
+	len = strlen(str);
+	char *str_e = &str[len];
+
+	while (*str_e != ']' && str_e != str)
+		str_e--;
+	if (*str_e != ']')
+		return;
+	*str_e = '\0';
+
+	tok = strtok(str, ",;");
+	while (tok) {
+		char *ch = strchr(tok, ':');
+
+		if (ch) {
+			*ch = '\0';
+			int port = atoi(tok);
+			int nvq = atoi(ch + 1);
+
+			if (port >= 0 &&
+					port < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&
+					nvq > 0 && nvq < MAX_QUEUES)
+				portq[port] = nvq;
+		}
+
+		tok = strtok(NULL, ",;");
+	}
+}
+
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)
+{
+	struct rte_eth_devargs eth_da;
+
+	eth_da.nb_representor_ports = 0U;
+	if (dev->device.devargs && dev->device.devargs->args) {
+		char *ch = strstr(dev->device.devargs->args, "portqueues");
+
+		if (ch) {
+			read_port_queues_mapping(ch, rep.portqueues);
+			/*
+			 * Remove this extension. DPDK cannot read representor=[x] if added
+			 * parameter to the end
+			 */
+			 *ch = '\0';
+		}
+
+		int err = rte_eth_devargs_parse(dev->device.devargs->args,
+						&eth_da);
+		if (err) {
+			rte_errno = -err;
+			NT_LOG(ERR, VDPA,
+			       "failed to process device arguments: %s",
+			       strerror(rte_errno));
+			return -1;
+		}
+
+		if (eth_da.nb_representor_ports) {
+			rep.vpf_dev = dev;
+			rep.eth_da = eth_da;
+		}
+	}
+	/* Will be set later when assigned to this PF */
+	rep.pf_backer_port_id = RTE_MAX_ETHPORTS;
+	return eth_da.nb_representor_ports;
+}
+
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,
+		       size_t private_data_size, int *n_vq)
+{
+	static int next_rep_p;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	eth_dev = rte_eth_dev_allocate(name);
+	if (!eth_dev)
+		return NULL;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u\n", __func__,
+	       __func__, __LINE__, eth_dev, eth_dev->data->port_id);
+
+	if (private_data_size) {
+		eth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,
+								RTE_CACHE_LINE_SIZE,
+								dev->device.numa_node);
+		if (!eth_dev->data->dev_private) {
+			rte_eth_dev_release_port(eth_dev);
+			return NULL;
+		}
+	}
+
+	eth_dev->intr_handle = NULL;
+	eth_dev->data->numa_node = dev->device.numa_node;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	if (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {
+		eth_dev->data->representor_id =
+			rep.eth_da.representor_ports[next_rep_p++];
+		eth_dev->device = &rep.vpf_dev->device;
+		eth_dev->data->backer_port_id = rep.pf_backer_port_id;
+	} else {
+		eth_dev->data->representor_id = nt_vfio_vf_num(dev);
+		eth_dev->device = &dev->device;
+	}
+
+	if (rep.portqueues[eth_dev->data->representor_id])
+		*n_vq = rep.portqueues[eth_dev->data->representor_id];
+
+	else
+		*n_vq = 1;
+	return eth_dev;
+}
+
+static inline const char *
+rte_vdev_device_name(const struct rte_pci_device *dev)
+{
+	if (dev && dev->device.name)
+		return dev->device.name;
+	return NULL;
+}
+
+static const char *const valid_args[] = {
+#define VP_VLAN_ID "vlan"
+	VP_VLAN_ID,
+#define VP_SEPARATE_SOCKET "sep"
+	VP_SEPARATE_SOCKET, NULL
+};
+
+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,
+				     struct rte_eth_dev **eth_dev)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev_data *data = NULL;
+	int i;
+	struct rte_eth_link pmd_link;
+	int numa_node = vdev->device.numa_node;
+	const char *name;
+	int n_vq;
+	int num_queues;
+	uint8_t port;
+	uint32_t vlan = 0;
+	uint32_t separate_socket = 0;
+
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(vdev->addr);
+
+	name = rte_vdev_device_name(vdev);
+
+	/*
+	 * Now do all data allocation - for eth_dev structure
+	 * and internal (private) data
+	 */
+
+	if (vdev && vdev->device.devargs) {
+		struct rte_kvargs *kvlist = NULL;
+
+		kvlist = rte_kvargs_parse(vdev->device.devargs->args,
+					  valid_args);
+		if (!kvlist) {
+			NT_LOG(ERR, VDPA, "error when parsing param");
+			goto error;
+		}
+
+		if (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {
+			if (rte_kvargs_process(kvlist, VP_VLAN_ID,
+					       &string_to_u32, &vlan) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_VLAN_ID);
+				goto error;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {
+			if (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,
+					       &string_to_u32,
+					       &separate_socket) < 0) {
+				NT_LOG(ERR, VDPA, "error to parse %s",
+				       VP_SEPARATE_SOCKET);
+				goto error;
+			}
+		}
+	}
+
+	n_vq = 0;
+	*eth_dev =
+		rte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);
+	if (*eth_dev == NULL)
+		goto error;
+
+	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+	if (data == NULL)
+		goto error;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\n",
+	       __func__, __func__, __LINE__, *eth_dev,
+	       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);
+
+	port = (*eth_dev)->data->representor_id;
+
+	if (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {
+		NT_LOG(ERR, VDPA,
+		       "(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\n",
+		       port, numa_node);
+		return -1;
+	}
+	NT_LOG(DBG, VDPA,
+	       "(%i) Creating ntnic-backend ethdev on numa socket %i\n", port,
+	       numa_node);
+
+	/* Build up private dev data */
+	internals = (*eth_dev)->data->dev_private;
+	internals->pci_dev = vdev;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->type = PORT_TYPE_VIRTUAL;
+		internals->nb_rx_queues = 1;
+		internals->nb_tx_queues = 1;
+	} else {
+		internals->type = PORT_TYPE_OVERRIDE;
+		internals->nb_rx_queues = n_vq;
+		internals->nb_tx_queues = n_vq;
+	}
+	internals->p_drv = get_pdrv_from_pci(vdev->addr);
+
+	if (n_vq > MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues. Max is %i\n",
+		       MAX_QUEUES);
+		goto error;
+	}
+
+	if (n_vq > FLOW_MAX_QUEUES) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\n",
+		       FLOW_MAX_QUEUES);
+		goto error;
+	}
+
+	/* Initialize HB output dest to none */
+	for (i = 0; i < MAX_QUEUES; i++)
+		internals->vpq[i].hw_id = -1;
+
+	internals->vhid = -1;
+	internals->port = port;
+	internals->if_index = port;
+	internals->port_id = (*eth_dev)->data->port_id;
+	internals->vlan = vlan;
+
+	/*
+	 * Create first time all queues in HW
+	 */
+	struct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)
+		num_queues = n_vq + 1; /* add 1: 0th for exception */
+	else
+		num_queues = n_vq;
+
+	int start_queue = allocate_queue(num_queues);
+
+	if (start_queue < 0) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Too many rx queues. Could not allocate %i\n",
+		       num_queues);
+		goto error;
+	}
+
+	int vhid = -1;
+
+	for (i = 0; i < num_queues; i++) {
+		queue_ids[i].id    = start_queue + i; /* 0th is exception queue */
+		queue_ids[i].hw_id = start_queue + i;
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internals->txq_scg[0].rss_target_id = -1;
+		internals->flw_dev = flow_get_eth_dev(0, internals->port,
+						      internals->port_id, num_queues,
+						      queue_ids,
+						      &internals->txq_scg[0].rss_target_id,
+						      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);
+	} else {
+		uint16_t in_port = internals->port & 1;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		struct pmd_internals *main_internals;
+		struct rte_eth_dev *eth_dev;
+		int i;
+		int status;
+
+		/* Get name of in_port */
+		status = rte_eth_dev_get_name_by_port(in_port, name);
+		if (status != 0) {
+			NT_LOG(ERR, VDPA, "Name of port not found");
+			goto error;
+		}
+		NT_LOG(DBG, VDPA, "Name of port %u = %s\n", in_port, name);
+
+		/* Get ether device for in_port */
+		eth_dev = rte_eth_dev_get_by_name(name);
+		if (eth_dev == NULL) {
+			NT_LOG(ERR, VDPA, "Failed to get eth device");
+			goto error;
+		}
+
+		/* Get internals for in_port */
+		main_internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		NT_LOG(DBG, VDPA, "internals port   %u\n\n",
+		       main_internals->port);
+		if (main_internals->port != in_port) {
+			NT_LOG(ERR, VDPA, "Port did not match");
+			goto error;
+		}
+
+		/* Get flow device for in_port */
+		internals->flw_dev = main_internals->flw_dev;
+
+		for (i = 0; i < num_queues && i < MAX_QUEUES; i++) {
+			NT_LOG(DBG, VDPA, "Queue:            %u\n",
+			       queue_ids[i].id);
+			NT_LOG(DBG, VDPA, "HW ID:            %u\n",
+			       queue_ids[i].hw_id);
+			if (flow_eth_dev_add_queue(main_internals->flw_dev,
+						   &queue_ids[i])) {
+				NT_LOG(ERR, VDPA, "Could not add queue");
+				goto error;
+			}
+		}
+	}
+
+	if (!internals->flw_dev) {
+		NT_LOG(ERR, VDPA,
+		       "Error creating virtual port. Resource exhaustion in HW\n");
+		goto error;
+	}
+
+	char path[128];
+
+	if (!separate_socket) {
+		sprintf(path, "%sstdvio%i", DVIO_VHOST_DIR_NAME, port);
+	} else {
+		sprintf(path, "%sstdvio%i/stdvio%i", DVIO_VHOST_DIR_NAME, port,
+			port);
+	}
+
+	internals->vpq_nb_vq = n_vq;
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[1].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++) {
+			internals->vpq[i] =
+				queue_ids[i + 1]; /* queue 0 is for exception */
+		}
+	} else {
+		if (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,
+				   queue_ids[0].hw_id, n_vq, n_vq,
+				   internals->port, &vhid)) {
+			NT_LOG(ERR, VDPA,
+			       "*********** ERROR *********** vDPA RELAY INIT\n");
+			goto error;
+		}
+		for (i = 0; i < n_vq; i++)
+			internals->vpq[i] = queue_ids[i];
+	}
+
+	/*
+	 * Exception queue for OVS SW path
+	 */
+	internals->rxq_scg[0].queue = queue_ids[0];
+	internals->txq_scg[0].queue =
+		queue_ids[0]; /* use same index in Rx and Tx rings */
+	internals->rxq_scg[0].enabled = 0;
+	internals->txq_scg[0].port = port;
+
+	internals->txq_scg[0].type = internals->type;
+	internals->rxq_scg[0].type = internals->type;
+	internals->rxq_scg[0].port = internals->port;
+
+	/* Setup pmd_link info */
+	pmd_link.link_speed = ETH_SPEED_NUM_NONE;
+	pmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	pmd_link.link_status = ETH_LINK_DOWN;
+
+	rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+	data->dev_private = internals;
+	data->port_id = (*eth_dev)->data->port_id;
+
+	data->nb_rx_queues = 1; /* this is exception */
+	data->nb_tx_queues = 1;
+
+	data->dev_link = pmd_link;
+	data->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];
+	data->numa_node = numa_node;
+
+	(*eth_dev)->data = data;
+	(*eth_dev)->dev_ops = &nthw_eth_dev_ops;
+
+	if (pmd_intern_base) {
+		struct pmd_internals *intern = pmd_intern_base;
+
+		while (intern->next)
+			intern = intern->next;
+		intern->next = internals;
+	} else {
+		pmd_intern_base = internals;
+	}
+	internals->next = NULL;
+
+	__atomic_store_n(&internals->vhid, vhid, __ATOMIC_RELAXED);
+
+	LIST_INIT(&internals->mtr_profiles);
+	LIST_INIT(&internals->mtrs);
+	return 0;
+
+error:
+	if (data)
+		rte_free(data);
+	if (internals)
+		rte_free(internals);
+	return -1;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues
+ * are going to VF/vDPA
+ */
+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues
+ * are coming from VF/vDPA
+ */
+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,
+				     struct rte_mbuf **bufs __rte_unused,
+				     uint16_t nb_pkts __rte_unused)
+{
+	return 0;
+}
+
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct pmd_internals *internals;
+	struct rte_eth_dev *eth_dev;
+
+	/* Create virtual function DPDK PCI devices.*/
+	if (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)
+		return -1;
+
+	internals = (struct pmd_internals *)eth_dev->data->dev_private;
+
+	if (internals->type == PORT_TYPE_OVERRIDE) {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;
+	} else {
+		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
+		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
+	}
+
+	rte_eth_dev_probing_finish(eth_dev);
+
+	return 0;
+}
+
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev = NULL;
+
+	NT_LOG(DBG, VDPA, "Closing ntvp pmd on numa socket %u\n",
+	       rte_socket_id());
+
+	if (!pci_dev)
+		return -1;
+
+	/* Clean up all vDPA devices */
+	nthw_vdpa_close();
+
+	/* reserve an ethdev entry */
+	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));
+	if (eth_dev == NULL)
+		return -1;
+
+	rte_free(eth_dev->data->dev_private);
+	rte_free(eth_dev->data);
+
+	rte_eth_dev_release_port(eth_dev);
+
+	return 0;
+}
+
+/*
+ * LAG
+ */
+
+#define LAG_PORT0_ONLY (100)
+#define LAG_BALANCED_50_50 (50)
+#define LAG_PORT1_ONLY (0)
+
+#define LAG_NO_TX (0)
+#define LAG_PORT0_INDEX (1)
+#define LAG_PORT1_INDEX (2)
+#define LAG_HASH_INDEX (3)
+
+static int lag_nop(lag_config_t *config __rte_unused)
+{
+	return 0;
+}
+
+static int lag_balance(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: balanced output\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);
+}
+
+static int lag_port0_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 0 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);
+}
+
+static int lag_port1_active(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: port 1 output only\n");
+	return lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);
+}
+
+static int lag_notx(lag_config_t *config __rte_unused)
+{
+	NT_LOG(DBG, ETHDEV, "AA LAG: no link\n");
+
+	int retval = 0;
+
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);
+	retval +=
+		lag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);
+	return retval;
+}
+
+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)
+{
+	struct adapter_info_s *p_adapter_info =
+			&lag_config->internals->p_drv->ntdrv.adapter_info;
+	const bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);
+
+	NT_LOG(DBG, ETHDEV, "port %d status: %d\n", port, link_up);
+	return link_up;
+}
+
+static int lag_get_status(lag_config_t *config)
+{
+	uint8_t port0 = lag_get_link_status(config, 0);
+
+	uint8_t port1 = lag_get_link_status(config, 1);
+
+	uint8_t status = (port1 << 1 | port0);
+	return status;
+}
+
+static int lag_activate_primary(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->primary_port == 0) {
+		/* If port 0 is the active primary, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the backup port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_activate_backup(lag_config_t *config)
+{
+	int retval;
+
+	uint8_t port_0_distribution;
+	uint8_t blocked_port;
+
+	if (config->backup_port == 0) {
+		/* If port 0 is the active backup, then it take 100% of the hash distribution. */
+		port_0_distribution = 100;
+		blocked_port = LAG_PORT1_INDEX;
+	} else {
+		/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */
+		port_0_distribution = 0;
+		blocked_port = LAG_PORT0_INDEX;
+	}
+
+	/* Tx only on the backup port */
+	retval =
+		lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);
+
+	/* Block Rx on the primary port */
+	retval += lag_set_port_block(0, blocked_port);
+
+	return retval;
+}
+
+static int lag_active_backup(lag_config_t *config)
+{
+	uint8_t backup_port_active = 0;
+
+	/* Initialize with the primary port active */
+	lag_activate_primary(config);
+
+	while (config->lag_thread_active) {
+		usleep(500 *
+		       1000); /* 500 ms sleep between testing the link status. */
+
+		bool primary_port_status =
+			lag_get_link_status(config, config->primary_port);
+
+		if (!primary_port_status) {
+			bool backup_port_status =
+				lag_get_link_status(config, config->backup_port);
+			/* If the backup port has been activated, no need to do more. */
+			if (backup_port_active)
+				continue;
+
+			/* If the backup port is up, flip to it. */
+			if (backup_port_status) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port down => swapping to backup port\n");
+				lag_activate_backup(config);
+				backup_port_active = 1;
+			}
+		} else {
+			/* If using the backup port and primary come back. */
+			if (backup_port_active) {
+				NT_LOG(DBG, ETHDEV,
+				       "LAG: primary port restored => swapping to primary port\n");
+				lag_activate_primary(config);
+				backup_port_active = 0;
+			} /* Backup is active, while primary is restored. */
+		} /* Primary port status */
+	}
+
+	return 0;
+}
+
+typedef int (*lag_aa_action)(lag_config_t *config);
+
+/* port 0 is LSB and port 1 is MSB */
+enum lag_state_e {
+	P0DOWN_P1DOWN = 0b00,
+	P0UP_P1DOWN = 0b01,
+	P0DOWN_P1UP = 0b10,
+	P0UP_P1UP = 0b11
+};
+
+struct lag_action_s {
+	enum lag_state_e src_state;
+	enum lag_state_e dst_state;
+	lag_aa_action action;
+};
+
+struct lag_action_s actions[] = {
+	/* No action in same state */
+	{ P0UP_P1UP, P0UP_P1UP, lag_nop },
+	{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },
+	{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },
+	{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },
+
+	/* UU start */
+	{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },
+
+	/* UD start */
+	{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },
+	{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },
+
+	/* DU start */
+	{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },
+	{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },
+
+	/* DD start */
+	{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },
+	{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },
+	{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },
+};
+
+static lag_aa_action lookup_action(enum lag_state_e current_state,
+				   enum lag_state_e new_state)
+{
+	uint32_t i;
+
+	for (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {
+		if (actions[i].src_state == current_state &&
+				actions[i].dst_state == new_state)
+			return actions[i].action;
+	}
+	return NULL;
+}
+
+static int lag_active_active(lag_config_t *config)
+{
+	enum lag_state_e ports_status;
+
+	/* Set the initial state to 50/50% */
+	enum lag_state_e current_state = P0UP_P1UP;
+
+	lag_balance(config);
+	/* No ports are blocked in active/active */
+	lag_set_port_block(0, 0);
+
+	lag_aa_action action;
+
+	while (config->lag_thread_active) {
+		/* 500 ms sleep between testing the link status. */
+		usleep(500 * 1000);
+
+		ports_status = lag_get_status(config);
+
+		action = lookup_action(current_state, ports_status);
+		action(config);
+
+		current_state = ports_status;
+	}
+
+	return 0;
+}
+
+static void *lag_management(void *arg)
+{
+	lag_config_t *config = (lag_config_t *)arg;
+
+	switch (config->mode) {
+	case BONDING_MODE_ACTIVE_BACKUP:
+		lag_active_backup(config);
+		break;
+
+	case BONDING_MODE_8023AD:
+		lag_active_active(config);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported NTbond mode\n");
+		return NULL;
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h
new file mode 100644
index 0000000000..ee0d84ce82
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_ethdev.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_ETHDEV_H__
+#define __NTNIC_ETHDEV_H__
+
+#include <rte_ether.h>
+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */
+#include <rte_mtr_driver.h>
+#include <rte_mbuf.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+
+#include "ntos_system.h"
+#include "ntnic_dbsconfig.h"
+#include "stream_binary_flow_api.h"
+
+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)
+#undef ETH_LINK_HALF_DUPLEX
+#undef ETH_LINK_FULL_DUPLEX
+#undef ETH_LINK_DOWN
+#undef ETH_LINK_UP
+#undef ETH_LINK_FIXED
+#undef ETH_LINK_AUTONEG
+#undef ETH_SPEED_NUM_NONE
+#undef ETH_SPEED_NUM_10M
+#undef ETH_SPEED_NUM_100M
+#undef ETH_SPEED_NUM_1G
+#undef ETH_SPEED_NUM_2_5G
+#undef ETH_SPEED_NUM_5G
+#undef ETH_SPEED_NUM_10G
+#undef ETH_SPEED_NUM_20G
+#undef ETH_SPEED_NUM_25G
+#undef ETH_SPEED_NUM_40G
+#undef ETH_SPEED_NUM_50G
+#undef ETH_SPEED_NUM_56G
+#undef ETH_SPEED_NUM_100G
+#undef ETH_SPEED_NUM_200G
+#undef ETH_SPEED_NUM_UNKNOWN
+#undef ETH_LINK_SPEED_AUTONEG
+#undef ETH_LINK_SPEED_FIXED
+#undef ETH_LINK_SPEED_10M_HD
+#undef ETH_LINK_SPEED_10M
+#undef ETH_LINK_SPEED_100M_HD
+#undef ETH_LINK_SPEED_100M
+#undef ETH_LINK_SPEED_1G
+#undef ETH_LINK_SPEED_2_5G
+#undef ETH_LINK_SPEED_5G
+#undef ETH_LINK_SPEED_10G
+#undef ETH_LINK_SPEED_20G
+#undef ETH_LINK_SPEED_25G
+#undef ETH_LINK_SPEED_40G
+#undef ETH_LINK_SPEED_50G
+#undef ETH_LINK_SPEED_56G
+#undef ETH_LINK_SPEED_100G
+#undef ETH_LINK_SPEED_200G
+#undef ETH_RSS_IP
+#undef ETH_RSS_UDP
+#undef ETH_RSS_TCP
+#undef ETH_RSS_SCTP
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#endif
+
+#define NUM_MAC_ADDRS_PER_PORT (16U)
+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)
+
+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256
+
+/* Total max ports per NT NFV NIC */
+#define MAX_NTNIC_PORTS 2
+
+/* Max RSS queues */
+#define MAX_QUEUES 125
+
+#define SG_NB_HW_RX_DESCRIPTORS 1024
+#define SG_NB_HW_TX_DESCRIPTORS 1024
+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
+
+#define SG_HDR_SIZE 12
+
+/* VQ buffers needed to fit all data in packet + header */
+#define NUM_VQ_SEGS(_data_size_) \
+	({ \
+		size_t _size = (_data_size_); \
+		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \
+		(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \
+		SG_HW_TX_PKT_BUFFER_SIZE) : 1; \
+		_segment_count; \
+	})
+
+
+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \
+	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \
+	(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
+
+#define MAX_REL_VQS 128
+
+/* Functions: */
+struct pmd_internals *vp_vhid_instance_ready(int vhid);
+struct pmd_internals *vp_path_instance_ready(const char *path);
+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);
+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);
+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);
+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);
+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,
+				    int managed);
+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);
+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
+			   uint16_t vq_descr_idx,
+			   struct nthw_memory_descriptor *vq_bufs, int max_segs,
+			   struct rte_mbuf *mbuf);
+
+extern int lag_active;
+extern uint64_t rte_tsc_freq;
+extern rte_spinlock_t hwlock;
+
+/* Structs: */
+
+#define SG_HDR_SIZE 12
+
+struct _pkt_hdr_rx {
+	uint32_t cap_len : 14;
+	uint32_t fid : 10;
+	uint32_t ofs1 : 8;
+	uint32_t ip_prot : 8;
+	uint32_t port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+struct _pkt_hdr_tx {
+	uint32_t cap_len : 14;
+	uint32_t lso_cso0 : 9;
+	uint32_t lso_cso1 : 9;
+	uint32_t lso_cso2 : 8;
+	/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */
+	uint32_t bypass_port : 13;
+	uint32_t descr : 8;
+	uint32_t descr_12b : 1;
+	uint32_t color_type : 2;
+	uint32_t color : 32;
+};
+
+/* Compile time verification of scatter gather header size. */
+typedef char check_sg_pkt_rx_hdr_size
+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];
+typedef char check_sg_pkt_tx_hdr_size
+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];
+
+typedef void *handle_t;
+
+struct hwq_s {
+	int vf_num;
+	struct nthw_memory_descriptor virt_queues_ctrl;
+	struct nthw_memory_descriptor *pkt_buffers;
+};
+
+struct ntnic_rx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+
+	struct rte_mempool *mb_pool; /* mbuf memory pool */
+	uint16_t buf_size; /* size of data area in mbuf */
+	unsigned long rx_pkts; /* Rx packet statistics */
+	unsigned long rx_bytes; /* Rx bytes statistics */
+	unsigned long err_pkts; /* Rx error packet statistics */
+	int enabled; /* Enabling/disabling of this queue */
+
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_rx_descr;
+	nt_meta_port_type_t type;
+	uint32_t port; /* Rx port for this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+
+} __rte_cache_aligned;
+
+struct ntnic_tx_queue {
+	struct flow_queue_id_s
+		queue; /* queue info - user id and hw queue index */
+	struct hwq_s hwq;
+	struct nthw_virt_queue *vq;
+	int nb_hw_tx_descr;
+	/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */
+	int target_id;
+	nt_meta_port_type_t type;
+	/* only used for exception tx queue from OVS SW switching */
+	int rss_target_id;
+
+	uint32_t port; /* Tx port for this queue */
+	unsigned long tx_pkts; /* Tx packet statistics */
+	unsigned long tx_bytes; /* Tx bytes statistics */
+	unsigned long err_pkts; /* Tx error packet stat */
+	int enabled; /* Enabling/disabling of this queue */
+	enum fpga_info_profile profile; /* Vswitch / Inline / Capture */
+} __rte_cache_aligned;
+
+#define MAX_ARRAY_ENTRIES MAX_QUEUES
+struct array_s {
+	uint32_t value[MAX_ARRAY_ENTRIES];
+	int count;
+};
+
+/* Configuerations related to LAG management */
+typedef struct {
+	uint8_t mode;
+
+	int8_t primary_port;
+	int8_t backup_port;
+
+	uint32_t ntpl_rx_id;
+
+	pthread_t lag_tid;
+	uint8_t lag_thread_active;
+
+	struct pmd_internals *internals;
+} lag_config_t;
+
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+#define BONDING_MODE_8023AD (4)
+struct nt_mtr_profile {
+	LIST_ENTRY(nt_mtr_profile) next;
+	uint32_t profile_id;
+	struct rte_mtr_meter_profile profile;
+};
+
+struct nt_mtr {
+	LIST_ENTRY(nt_mtr) next;
+	uint32_t mtr_id;
+	int shared;
+	struct nt_mtr_profile *profile;
+};
+
+enum virt_port_comm {
+	VIRT_PORT_NEGOTIATED_NONE,
+	VIRT_PORT_NEGOTIATED_SPLIT,
+	VIRT_PORT_NEGOTIATED_PACKED,
+	VIRT_PORT_USE_RELAY
+};
+
+#define MAX_PATH_LEN 128
+
+struct pmd_internals {
+	const struct rte_pci_device *pci_dev;
+
+	struct flow_eth_dev *flw_dev;
+
+	char name[20];
+	char vhost_path[MAX_PATH_LEN];
+
+	int n_intf_no;
+	int if_index;
+
+	int lpbk_mode;
+
+	uint8_t nb_ports_on_adapter;
+	uint8_t ts_multiplier;
+	uint16_t min_tx_pkt_size;
+	uint16_t max_tx_pkt_size;
+
+	unsigned int nb_rx_queues; /* Number of Rx queues configured */
+	unsigned int nb_tx_queues; /* Number of Tx queues configured */
+	uint32_t port;
+	uint8_t port_id;
+
+	nt_meta_port_type_t type;
+	struct flow_queue_id_s vpq[MAX_QUEUES];
+	unsigned int vpq_nb_vq;
+	int vhid; /* if a virtual port type - the vhid */
+	enum virt_port_comm vport_comm; /* link and how split,packed,relay */
+	uint32_t vlan;
+
+	lag_config_t *lag_config;
+
+	struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */
+	struct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */
+
+	struct drv_s *p_drv;
+	/* Ethernet (MAC) addresses. Element number zero denotes default address. */
+	struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];
+	/* Multicast ethernet (MAC) addresses. */
+	struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];
+
+	LIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;
+	LIST_HEAD(_mtrs, nt_mtr) mtrs;
+
+	uint64_t last_stat_rtc;
+	uint64_t rx_missed;
+
+	struct pmd_internals *next;
+};
+
+void cleanup_flows(struct pmd_internals *internals);
+int poll_statistics(struct pmd_internals *internals);
+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);
+
+#endif /* __NTNIC_ETHDEV_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h
new file mode 100644
index 0000000000..e90643ec6b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __CREATE_ELEMENTS_H__
+#define __CREATE_ELEMENTS_H__
+
+#include "stream_binary_flow_api.h"
+
+#define MAX_ELEMENTS 64
+#define MAX_ACTIONS 32
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+struct cnv_match_s {
+	struct flow_elem flow_elem[MAX_ELEMENTS];
+};
+
+struct tun_def_s {
+	struct flow_elem *tun_definition;
+	struct cnv_match_s match;
+};
+
+struct cnv_attr_s {
+	struct cnv_match_s match;
+	struct flow_attr attr;
+};
+
+struct cnv_action_s {
+	struct flow_action flow_actions[MAX_ACTIONS];
+	struct tun_def_s tun_def;
+	struct flow_action_rss flow_rss;
+	struct rte_flow_action_mark mark;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_action_queue queue;
+};
+
+/*
+ * Only needed because it eases the use of statistics through NTAPI
+ * for faster integration into NTAPI version of driver
+ * Therefore, this is only a good idea when running on a temporary NTAPI
+ * The query() functionality must go to flow engine, when moved to Open Source driver
+ */
+
+struct rte_flow {
+	void *flw_hdl;
+	int used;
+	uint32_t flow_stat_id;
+
+	uint64_t stat_pkts;
+	uint64_t stat_bytes;
+	uint8_t stat_tcp_flags;
+};
+
+enum nt_rte_flow_item_type {
+	NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ITEM_TYPE_TAG,
+	NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+};
+
+enum nt_rte_flow_action_type {
+	NT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
+	NT_RTE_FLOW_ACTION_TYPE_TAG,
+	NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+	NT_RTE_FLOW_ACTION_TYPE_JUMP,
+};
+
+static int convert_tables_initialized;
+
+#define MAX_RTE_ENUM_INDEX 127
+
+static int elem_list[MAX_RTE_ENUM_INDEX + 1];
+static int action_list[MAX_RTE_ENUM_INDEX + 1];
+
+#ifdef RTE_FLOW_DEBUG
+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];
+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];
+#endif
+
+#define CNV_TO_ELEM(item) \
+	({ \
+		int _temp_item = (item); \
+		((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \
+		elem_list[_temp_item] : -1); \
+	})
+
+
+#define CNV_TO_ACTION(action)                                   \
+	({                                                          \
+		int _temp_action = (action);                            \
+		(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \
+		action_list[_temp_action] : -1; \
+	})
+
+
+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int convert_error(struct rte_flow_error *error,
+			 struct flow_error *flow_error)
+{
+	if (error) {
+		error->cause = NULL;
+		error->message = flow_error->message;
+
+		if (flow_error->type == FLOW_ERROR_NONE ||
+				flow_error->type == FLOW_ERROR_SUCCESS)
+			error->type = RTE_FLOW_ERROR_TYPE_NONE;
+
+		else
+			error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+	}
+	return 0;
+}
+
+/*
+ * Map Flow MARK to flow stat id
+ */
+static uint32_t create_flow_stat_id_locked(uint32_t mark)
+{
+	uint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);
+
+	while (flow_stat_id_map[flow_stat_id])
+		flow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);
+
+	flow_stat_id_map[flow_stat_id] = mark + 1;
+	return flow_stat_id;
+}
+
+static uint32_t create_flow_stat_id(uint32_t mark)
+{
+	rte_spinlock_lock(&flow_lock);
+	uint32_t ret = create_flow_stat_id_locked(mark);
+
+	rte_spinlock_unlock(&flow_lock);
+	return ret;
+}
+
+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)
+{
+	if (flow_stat_id < MAX_COLOR_FLOW_STATS)
+		flow_stat_id_map[flow_stat_id] = 0;
+}
+
+static void initialize_global_cnv_tables(void)
+{
+	if (convert_tables_initialized)
+		return;
+
+	memset(elem_list, -1, sizeof(elem_list));
+	elem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;
+	elem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;
+	elem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;
+	elem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;
+	elem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;
+	elem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;
+	elem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;
+	elem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;
+	elem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;
+	elem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;
+	elem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;
+	elem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;
+	elem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;
+
+#ifdef RTE_FLOW_DEBUG
+	elem_list_str[RTE_FLOW_ITEM_TYPE_END] = "FLOW_ELEM_TYPE_END";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = "FLOW_ELEM_TYPE_ANY";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = "FLOW_ELEM_TYPE_ETH";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = "FLOW_ELEM_TYPE_VLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = "FLOW_ELEM_TYPE_IPV4";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = "FLOW_ELEM_TYPE_IPV6";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = "FLOW_ELEM_TYPE_UDP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = "FLOW_ELEM_TYPE_SCTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = "FLOW_ELEM_TYPE_TCP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = "FLOW_ELEM_TYPE_ICMP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = "FLOW_ELEM_TYPE_VXLAN";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = "FLOW_ELEM_TYPE_GTP";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = "FLOW_ELEM_TYPE_PORT_ID";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = "FLOW_ELEM_TYPE_TAG";
+	elem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = "FLOW_ELEM_TYPE_VOID";
+#endif
+
+	memset(action_list, -1, sizeof(action_list));
+	action_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;
+	action_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;
+	action_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;
+	action_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;
+	action_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;
+	action_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;
+	action_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;
+	action_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;
+	action_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;
+	action_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		FLOW_ACTION_TYPE_VXLAN_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		FLOW_ACTION_TYPE_PUSH_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		FLOW_ACTION_TYPE_SET_VLAN_VID;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		FLOW_ACTION_TYPE_SET_VLAN_PCP;
+	action_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		FLOW_ACTION_TYPE_POP_VLAN;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		FLOW_ACTION_TYPE_RAW_ENCAP;
+	action_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		FLOW_ACTION_TYPE_RAW_DECAP;
+	action_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		FLOW_ACTION_TYPE_MODIFY_FIELD;
+
+#ifdef RTE_FLOW_DEBUG
+	action_list_str[RTE_FLOW_ACTION_TYPE_END] = "FLOW_ACTION_TYPE_END";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MARK] = "FLOW_ACTION_TYPE_MARK";
+	action_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =
+		"FLOW_ACTION_TYPE_SET_TAG";
+	action_list_str[RTE_FLOW_ACTION_TYPE_DROP] = "FLOW_ACTION_TYPE_DROP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = "FLOW_ACTION_TYPE_COUNT";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RSS] = "FLOW_ACTION_TYPE_RSS";
+	action_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =
+		"FLOW_ACTION_TYPE_PORT_ID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = "FLOW_ACTION_TYPE_QUEUE";
+	action_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = "FLOW_ACTION_TYPE_JUMP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_METER] = "FLOW_ACTION_TYPE_METER";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =
+		"FLOW_ACTION_TYPE_VXLAN_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =
+		"FLOW_ACTION_TYPE_VXLAN_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =
+		"FLOW_ACTION_TYPE_PUSH_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =
+		"FLOW_ACTION_TYPE_SET_VLAN_VID";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =
+		"FLOW_ACTION_TYPE_SET_VLAN_PCP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =
+		"FLOW_ACTION_TYPE_POP_VLAN";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =
+		"FLOW_ACTION_TYPE_RAW_ENCAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =
+		"FLOW_ACTION_TYPE_RAW_DECAP";
+	action_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =
+		"FLOW_ACTION_TYPE_MODIFY_FIELD";
+#endif
+
+	convert_tables_initialized = 1;
+}
+
+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,
+			      struct flow_elem *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = FLOW_ELEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type =
+		((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+			ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&
+			(data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) { /* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+	} else if (next_header == 6) { /* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+	} else if (next_header == 17) { /* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+	} else if (next_header == 132) { /* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri].type = FLOW_ELEM_TYPE_GTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit =
+			((struct rte_gtp_hdr *)&data[pkti])->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti <
+					(int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri].type = FLOW_ELEM_TYPE_GTP;
+			out[hdri].spec = &data[pkti];
+			out[hdri].mask = (preserve != NULL) ? &preserve[pkti] :
+					 NULL;
+
+			uint8_t next_ext =
+				((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri].type = FLOW_ELEM_TYPE_GTP;
+				out[hdri].spec = &data[pkti];
+				out[hdri].mask = (preserve != NULL) ?
+						 &preserve[pkti] :
+						 NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = FLOW_ELEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
+static int create_attr(struct cnv_attr_s *attribute,
+		       const struct rte_flow_attr *attr)
+{
+	memset(&attribute->attr, 0x0, sizeof(struct flow_attr));
+	if (attr) {
+		attribute->attr.group = attr->group;
+		attribute->attr.priority = attr->priority;
+	}
+	return 0;
+}
+
+static int create_match_elements(struct cnv_match_s *match,
+				 const struct rte_flow_item items[],
+				 int max_elem)
+{
+	int eidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!items) {
+		NT_LOG(ERR, FILTER, "ERROR no items to iterate!\n");
+		return -1;
+	}
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	do {
+		type = CNV_TO_ELEM(items[iter_idx].type);
+		if (type < 0) {
+			if ((int)items[iter_idx].type ==
+					NT_RTE_FLOW_ITEM_TYPE_TUNNEL) {
+				type = FLOW_ELEM_TYPE_TUNNEL;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown item type received!\n");
+				return -1;
+			}
+		}
+
+		if (type >= 0) {
+			if (items[iter_idx].last) {
+				/* Ranges are not supported yet */
+				NT_LOG(ERR, FILTER,
+				       "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\n");
+				return -1;
+			}
+
+			if (eidx == max_elem) {
+				NT_LOG(ERR, FILTER,
+				       "ERROR TOO MANY ELEMENTS ENCOUNTERED!\n");
+				return -1;
+			}
+
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER,
+			       "RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\n",
+			       items[iter_idx].type, type,
+			       ((int)items[iter_idx].type >= 0) ?
+			       elem_list_str[items[iter_idx].type] :
+			       "FLOW_ELEM_TYPE_TUNNEL");
+
+			switch (type) {
+			case FLOW_ELEM_TYPE_ETH:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_eth *eth =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->d_addr.addr_b[0] & 0xFF,
+					       eth->d_addr.addr_b[1] & 0xFF,
+					       eth->d_addr.addr_b[2] & 0xFF,
+					       eth->d_addr.addr_b[3] & 0xFF,
+					       eth->d_addr.addr_b[4] & 0xFF,
+					       eth->d_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         src=%02X:%02X:%02X:%02X:%02X:%02X\n",
+					       eth->s_addr.addr_b[0] & 0xFF,
+					       eth->s_addr.addr_b[1] & 0xFF,
+					       eth->s_addr.addr_b[2] & 0xFF,
+					       eth->s_addr.addr_b[3] & 0xFF,
+					       eth->s_addr.addr_b[4] & 0xFF,
+					       eth->s_addr.addr_b[5] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                         type=%04x\n",
+					       htons(eth->ether_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_VLAN:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_vlan *vlan =
+						(const struct flow_elem_vlan *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\n",
+					       htons(vlan->tci));
+					NT_LOG(DBG, FILTER,
+					       "                          inner type=%04x\n",
+					       htons(vlan->inner_type));
+				}
+				break;
+			case FLOW_ELEM_TYPE_IPV4:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%u\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%u\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%u\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%u\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%u\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%u\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%u\n",
+					       ip->hdr.length);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_ipv4 *ip =
+							items[iter_idx].mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          src=%d.%d.%d.%d\n",
+					       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,
+					       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);
+					NT_LOG(DBG, FILTER,
+					       "                          fragment_offset=%x\n",
+					       ip->hdr.frag_offset);
+					NT_LOG(DBG, FILTER,
+					       "                          next_proto_id=%x\n",
+					       ip->hdr.next_proto_id);
+					NT_LOG(DBG, FILTER,
+					       "                          packet_id=%x\n",
+					       ip->hdr.id);
+					NT_LOG(DBG, FILTER,
+					       "                          time_to_live=%x\n",
+					       ip->hdr.ttl);
+					NT_LOG(DBG, FILTER,
+					       "                          type_of_service=%x\n",
+					       ip->hdr.tos);
+					NT_LOG(DBG, FILTER,
+					       "                          version_ihl=%x\n",
+					       ip->hdr.version_ihl);
+					NT_LOG(DBG, FILTER,
+					       "                          total_length=%x\n",
+					       ip->hdr.length);
+				}
+				break;
+			case FLOW_ELEM_TYPE_UDP:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_udp *udp =
+						(const struct flow_elem_udp *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_UDP MASK: src port=%04x\n",
+					       htons(udp->hdr.src_port));
+					NT_LOG(DBG, FILTER,
+					       "                         dst port=%04x\n",
+					       htons(udp->hdr.dst_port));
+				}
+				break;
+			case FLOW_ELEM_TYPE_TAG:
+				if (items[iter_idx].spec) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.spec;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG SPEC: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				if (items[iter_idx].mask) {
+					const struct flow_elem_tag *tag =
+						(const struct flow_elem_tag *)
+						items[iter_idx]
+						.mask;
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ELEM_TYPE_TAG MASK: data=%u\n",
+					       tag->data);
+					NT_LOG(DBG, FILTER,
+					       "                         index=%u\n",
+					       tag->index);
+				}
+				break;
+			case FLOW_ELEM_TYPE_VXLAN: {
+				const struct flow_elem_vxlan *vxlan =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.spec;
+				const struct flow_elem_vxlan *mask =
+					(const struct flow_elem_vxlan *)
+					items[iter_idx]
+					.mask;
+
+				uint32_t vni =
+					(uint32_t)(((uint32_t)vxlan->vni[0]
+						    << 16) |
+						   ((uint32_t)vxlan->vni[1]
+						    << 8) |
+						   ((uint32_t)vxlan->vni[2]));
+				uint32_t vni_mask =
+					(uint32_t)(((uint32_t)mask->vni[0]
+						    << 16) |
+						   ((uint32_t)mask->vni[1]
+						    << 8) |
+						   ((uint32_t)mask->vni[2]));
+
+				NT_LOG(INF, FILTER, "VNI: %08x / %08x\n", vni,
+				       vni_mask);
+			}
+			break;
+			}
+#endif
+
+			match->flow_elem[eidx].type = type;
+			match->flow_elem[eidx].spec = items[iter_idx].spec;
+			match->flow_elem[eidx].mask = items[iter_idx].mask;
+
+			eidx++;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ELEM_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int
+create_action_elements_vswitch(struct cnv_action_s *action,
+			       const struct rte_flow_action actions[],
+			       int max_elem, uint32_t *flow_stat_id)
+{
+	int aidx = 0;
+	int iter_idx = 0;
+	int type = -1;
+
+	if (!actions)
+		return -1;
+
+	if (!convert_tables_initialized)
+		initialize_global_cnv_tables();
+
+	*flow_stat_id = MAX_COLOR_FLOW_STATS;
+	do {
+		type = CNV_TO_ACTION(actions[iter_idx].type);
+		if (type < 0) {
+			if ((int)actions[iter_idx].type ==
+					NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
+				type = FLOW_ACTION_TYPE_TUNNEL_SET;
+			} else {
+				NT_LOG(ERR, FILTER,
+				       "ERROR unknown action type received!\n");
+				return -1;
+			}
+		}
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[iter_idx].type, type,
+		       ((int)actions[iter_idx].type >= 0) ?
+		       action_list_str[actions[iter_idx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case -1:
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(INF, FILTER,
+				       "RTE ACTION UNSUPPORTED %i\n",
+				       actions[iter_idx].type);
+#endif
+				return -1;
+
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[iter_idx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+				break;
+			}
+
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				const struct rte_flow_action_vxlan_encap *tun =
+					(const struct rte_flow_action_vxlan_encap
+					 *)actions[iter_idx]
+					.conf;
+				if (!tun || create_match_elements(&action->tun_def.match,
+								  tun->definition,
+								  MAX_ELEMENTS) != 0)
+					return -1;
+				action->tun_def.tun_definition =
+					action->tun_def.match.flow_elem;
+				action->flow_actions[aidx].conf =
+					&action->tun_def;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_MARK: {
+				const struct rte_flow_action_mark *mark_id =
+					(const struct rte_flow_action_mark *)
+					actions[iter_idx]
+					.conf;
+				if (mark_id) {
+#ifdef RTE_FLOW_DEBUG
+					NT_LOG(DBG, FILTER, "Mark ID=%u\n",
+					       mark_id->id);
+#endif
+					*flow_stat_id = create_flow_stat_id(mark_id->id);
+					action->mark.id = *flow_stat_id;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+
+				} else {
+					action->flow_actions[aidx].conf =
+						actions[iter_idx].conf;
+				}
+			}
+			break;
+
+			default:
+				/* Compatible */
+
+				/*
+				 * OVS Full offload does not add mark in RTE Flow
+				 * We need one in FPGA to control flow(color) statistics
+				 */
+				if (type == FLOW_ACTION_TYPE_END &&
+						*flow_stat_id == MAX_COLOR_FLOW_STATS) {
+					/* We need to insert a mark for our FPGA */
+					*flow_stat_id = create_flow_stat_id(0);
+					action->mark.id = *flow_stat_id;
+
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_MARK;
+					action->flow_actions[aidx].conf =
+						&action->mark;
+					aidx++;
+
+					/* Move end type */
+					action->flow_actions[aidx].type =
+						FLOW_ACTION_TYPE_END;
+				}
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[iter_idx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[iter_idx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[iter_idx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+
+				action->flow_actions[aidx].conf =
+					actions[iter_idx].conf;
+				break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+			iter_idx++;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+static int create_action_elements_inline(struct cnv_action_s *action,
+		const struct rte_flow_action actions[],
+		int max_elem, uint32_t queue_offset)
+{
+	int aidx = 0;
+	int type = -1;
+
+	do {
+		type = CNV_TO_ACTION(actions[aidx].type);
+
+#ifdef RTE_FLOW_DEBUG
+		NT_LOG(INF, FILTER,
+		       "RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\n",
+		       actions[aidx].type, type,
+		       ((int)actions[aidx].type >= 0) ?
+		       action_list_str[actions[aidx].type] :
+		       "FLOW_ACTION_TYPE_TUNNEL_SET");
+#endif
+
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case FLOW_ACTION_TYPE_RSS: {
+				const struct rte_flow_action_rss *rss =
+					(const struct rte_flow_action_rss *)
+					actions[aidx]
+					.conf;
+				action->flow_rss.func =
+					FLOW_HASH_FUNCTION_DEFAULT;
+
+				if (rss->func !=
+						RTE_ETH_HASH_FUNCTION_DEFAULT)
+					return -1;
+				action->flow_rss.level = rss->level;
+				action->flow_rss.types = rss->types;
+				action->flow_rss.key_len = rss->key_len;
+				action->flow_rss.queue_num = rss->queue_num;
+				action->flow_rss.key = rss->key;
+				action->flow_rss.queue = rss->queue;
+				action->flow_actions[aidx].conf =
+					&action->flow_rss;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RSS: rss->level = %u\n",
+				       rss->level);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->types = 0x%" PRIX64 "\n",
+				       (unsigned long long)rss->types);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key_len = %u\n",
+				       rss->key_len);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->queue_num = %u\n",
+				       rss->queue_num);
+				NT_LOG(DBG, FILTER,
+				       "                      rss->key = %p\n",
+				       rss->key);
+				unsigned int i;
+
+				for (i = 0; i < rss->queue_num; i++) {
+					NT_LOG(DBG, FILTER,
+					       "                      rss->queue[%u] = %u\n",
+					       i, rss->queue[i]);
+				}
+#endif
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data,
+								    NULL, decap->size,
+								    action->decap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: size = %u\n",
+				       decap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\n",
+				       item_count);
+				for (int i = 0; i < item_count; i++) {
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_RAW_DECAP: item = %u\n",
+					       action->decap.items[i].type);
+				}
+#endif
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->decap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap
+					 *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data,
+								    encap->preserve,
+								    encap->size,
+								    action->encap.items);
+				if (item_count < 0)
+					return item_count;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\n",
+				       encap->size);
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\n",
+				       item_count);
+#endif
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf =
+					&action->encap;
+			}
+			break;
+
+			case FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)
+					actions[aidx]
+					.conf;
+				action->queue.index =
+					queue->index + queue_offset;
+				action->flow_actions[aidx].conf =
+					&action->queue;
+#ifdef RTE_FLOW_DEBUG
+				NT_LOG(DBG, FILTER,
+				       "FLOW_ACTION_TYPE_QUEUE: queue = %u\n",
+				       action->queue.index);
+#endif
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf =
+					actions[aidx].conf;
+
+#ifdef RTE_FLOW_DEBUG
+				switch (type) {
+				case FLOW_ACTION_TYPE_PORT_ID:
+					NT_LOG(DBG, FILTER,
+					       "Port ID=%u, Original=%u\n",
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->id,
+					       ((const struct rte_flow_action_port_id
+						 *)actions[aidx]
+						.conf)
+					       ->original);
+					break;
+				case FLOW_ACTION_TYPE_COUNT:
+					NT_LOG(DBG, FILTER, "Count ID=%u\n",
+					       ((const struct rte_flow_action_count
+						 *)actions[aidx]
+						.conf)
+					       ->id);
+					break;
+				case FLOW_ACTION_TYPE_SET_TAG:
+					NT_LOG(DBG, FILTER,
+					       "FLOW_ACTION_TYPE_SET_TAG: data=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->data);
+					NT_LOG(DBG, FILTER,
+					       "                          mask=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->mask);
+					NT_LOG(DBG, FILTER,
+					       "                          index=%u\n",
+					       ((const struct flow_action_tag *)
+						actions[aidx]
+						.conf)
+					       ->index);
+					break;
+				}
+#endif
+			}
+			break;
+			}
+
+			aidx++;
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != FLOW_ACTION_TYPE_END);
+
+	return (type >= 0) ? 0 : -1;
+}
+
+#endif /* __CREATE_ELEMENTS_H__ */
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
new file mode 100644
index 0000000000..6b19c2308e
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <malloc.h>
+
+#include "ntdrv_4ga.h"
+#include <rte_flow_driver.h>
+#include <rte_pci.h>
+#include "ntnic_ethdev.h"
+
+#include "ntlog.h"
+#include "nt_util.h"
+#include "create_elements.h"
+#include "ntnic_filter.h"
+
+#define MAX_RTE_FLOWS 8192
+#define MAX_PORTIDS 64
+
+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)
+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.
+#endif
+
+struct rte_flow nt_flows[MAX_RTE_FLOWS];
+
+static int is_flow_handle_typecast(struct rte_flow *flow)
+{
+	const void *first_element = &nt_flows[0];
+	const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];
+
+	return (void *)flow < first_element || (void *)flow > last_element;
+}
+
+static int convert_flow(struct rte_eth_dev *eth_dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct cnv_attr_s *attribute, struct cnv_match_s *match,
+			struct cnv_action_s *action,
+			struct rte_flow_error *error, uint32_t *flow_stat_id)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t queue_offset = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {
+		/*
+		 * The queues coming from the main PMD will always start from 0
+		 * When the port is a the VF/vDPA port the queues must be changed
+		 * to match the queues allocated for VF/vDPA.
+		 */
+		queue_offset = dev->vpq[0].id;
+	}
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!dev) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Missing eth_dev");
+		return -1;
+	}
+
+	if (create_attr(attribute, attr) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "Error in attr");
+		return -1;
+	}
+	if (create_match_elements(match, items, MAX_ELEMENTS) != 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Error in items");
+		return -1;
+	}
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		if (create_action_elements_inline(action, actions, MAX_ACTIONS,
+						  queue_offset) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+		if (attribute->attr.group > 0)
+			return 0;
+	} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {
+		if (create_action_elements_vswitch(action, actions, MAX_ACTIONS,
+						   flow_stat_id) != 0) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					   "Error in actions");
+			return -1;
+		}
+	} else {
+		rte_flow_error_set(error, EPERM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Unsupported adapter profile");
+		return -1;
+	}
+	return 0;
+}
+
+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
+			    struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+
+	int res = 0;
+
+	/* Set initial error */
+	convert_error(error, &flow_error);
+
+	if (!flow)
+		return 0;
+
+	if (is_flow_handle_typecast(flow)) {
+		res = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);
+		convert_error(error, &flow_error);
+	} else {
+		res = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);
+		convert_error(error, &flow_error);
+
+		rte_spinlock_lock(&flow_lock);
+		delete_flow_stat_id_locked(flow->flow_stat_id);
+		flow->used = 0;
+		rte_spinlock_unlock(&flow_lock);
+	}
+
+	/* Clear the flow statistics if successfully destroyed */
+	if (res == 0) {
+		flow->stat_pkts = 0UL;
+		flow->stat_bytes = 0UL;
+		flow->stat_tcp_flags = 0;
+	}
+
+	return res;
+}
+
+static int eth_flow_validate(struct rte_eth_dev *eth_dev,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_item items[],
+			     const struct rte_flow_action actions[],
+			     struct rte_flow_error *error)
+{
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+	uint32_t flow_stat_id = 0;
+	int res;
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return -EINVAL;
+
+	res = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,
+			    &flow_error);
+
+	if (res < 0)
+		convert_error(error, &flow_error);
+
+	return res;
+}
+
+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item items[],
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error)
+{
+	struct pmd_internals *dev = eth_dev->data->dev_private;
+	struct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;
+
+	struct cnv_attr_s attribute;
+	struct cnv_match_s match;
+	struct cnv_action_s action;
+
+	static struct flow_error flow_error = { .type = FLOW_ERROR_NONE,
+		       .message = "none"
+	};
+	uint32_t flow_stat_id = 0;
+
+#ifdef RTE_FLOW_DEBUG
+	NT_LOG(DBG, FILTER, "ntnic_flow_create port_id %u - %s\n",
+	       eth_dev->data->port_id, eth_dev->data->name);
+#endif
+
+	if (convert_flow(eth_dev, attr, items, actions, &attribute, &match,
+			 &action, error, &flow_stat_id) < 0)
+		return NULL;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+			attribute.attr.group > 0) {
+		void *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		return (struct rte_flow *)flw_hdl;
+	}
+
+	struct rte_flow *flow = NULL;
+
+	rte_spinlock_lock(&flow_lock);
+	int i;
+
+	for (i = 0; i < MAX_RTE_FLOWS; i++) {
+		if (!nt_flows[i].used) {
+			nt_flows[i].flow_stat_id = flow_stat_id;
+			if (nt_flows[i].flow_stat_id <
+					NT_MAX_COLOR_FLOW_STATS) {
+				nt_flows[i].used = 1;
+				flow = &nt_flows[i];
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&flow_lock);
+	if (flow) {
+		flow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,
+					    match.flow_elem,
+					    action.flow_actions, &flow_error);
+		convert_error(error, &flow_error);
+		if (!flow->flw_hdl) {
+			rte_spinlock_lock(&flow_lock);
+			delete_flow_stat_id_locked(flow->flow_stat_id);
+			flow->used = 0;
+			flow = NULL;
+			rte_spinlock_unlock(&flow_lock);
+		} else {
+#ifdef RTE_FLOW_DEBUG
+			NT_LOG(INF, FILTER, "Create Flow %p using stat_id %i\n",
+			       flow, flow->flow_stat_id);
+#endif
+		}
+	}
+	return flow;
+}
+
+uint64_t last_stat_rtc;
+
+int poll_statistics(struct pmd_internals *internals)
+{
+	int flow;
+	struct drv_s *p_drv = internals->p_drv;
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+	const int if_index = internals->if_index;
+
+	if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)
+		return -1;
+
+	assert(rte_tsc_freq > 0);
+
+	rte_spinlock_lock(&hwlock);
+
+	uint64_t now_rtc = rte_get_tsc_cycles();
+
+	/*
+	 * Check per port max once a second
+	 * if more than a second since last stat read, do a new one
+	 */
+	if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		return 0;
+	}
+
+	internals->last_stat_rtc = now_rtc;
+
+	pthread_mutex_lock(&p_nt_drv->stat_lck);
+
+	/*
+	 * Add the RX statistics increments since last time we polled.
+	 * (No difference if physical or virtual port)
+	 */
+	internals->rxq_scg[0].rx_pkts +=
+		p_nt4ga_stat->a_port_rx_packets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_packets_base[if_index];
+	internals->rxq_scg[0].rx_bytes +=
+		p_nt4ga_stat->a_port_rx_octets_total[if_index] -
+		p_nt4ga_stat->a_port_rx_octets_base[if_index];
+	internals->rxq_scg[0].err_pkts += 0;
+	internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -
+				p_nt4ga_stat->a_port_rx_drops_base[if_index];
+
+	/* _update the increment bases */
+	p_nt4ga_stat->a_port_rx_packets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_packets_total[if_index];
+	p_nt4ga_stat->a_port_rx_octets_base[if_index] =
+		p_nt4ga_stat->a_port_rx_octets_total[if_index];
+	p_nt4ga_stat->a_port_rx_drops_base[if_index] =
+		p_nt4ga_stat->a_port_rx_drops_total[if_index];
+
+	/* Tx (here we must distinguish between physical and virtual ports) */
+	if (internals->type == PORT_TYPE_PHYSICAL) {
+		/* LAG management of Tx stats. */
+		if (lag_active && if_index == 0) {
+			unsigned int i;
+			/*
+			 * Collect all LAG ports Tx stat into this one. Simplified to only collect
+			 * from port 0 and 1.
+			 */
+			for (i = 0; i < 2; i++) {
+				/* Add the statistics increments since last time we polled */
+				internals->txq_scg[0].tx_pkts +=
+					p_nt4ga_stat->a_port_tx_packets_total[i] -
+					p_nt4ga_stat->a_port_tx_packets_base[i];
+				internals->txq_scg[0].tx_bytes +=
+					p_nt4ga_stat->a_port_tx_octets_total[i] -
+					p_nt4ga_stat->a_port_tx_octets_base[i];
+				internals->txq_scg[0].err_pkts += 0;
+
+				/* _update the increment bases */
+				p_nt4ga_stat->a_port_tx_packets_base[i] =
+					p_nt4ga_stat->a_port_tx_packets_total[i];
+				p_nt4ga_stat->a_port_tx_octets_base[i] =
+					p_nt4ga_stat->a_port_tx_octets_total[i];
+			}
+		} else {
+			/* Add the statistics increments since last time we polled */
+			internals->txq_scg[0].tx_pkts +=
+				p_nt4ga_stat->a_port_tx_packets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_packets_base[if_index];
+			internals->txq_scg[0].tx_bytes +=
+				p_nt4ga_stat->a_port_tx_octets_total[if_index] -
+				p_nt4ga_stat->a_port_tx_octets_base[if_index];
+			internals->txq_scg[0].err_pkts += 0;
+
+			/* _update the increment bases */
+			p_nt4ga_stat->a_port_tx_packets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_packets_total[if_index];
+			p_nt4ga_stat->a_port_tx_octets_base[if_index] =
+				p_nt4ga_stat->a_port_tx_octets_total[if_index];
+		}
+	}
+	if (internals->type == PORT_TYPE_VIRTUAL) {
+		/* _update TX counters from HB queue counter */
+		unsigned int i;
+		struct host_buffer_counters *const p_hb_counters =
+				p_nt4ga_stat->mp_stat_structs_hb;
+		uint64_t v_port_packets_total = 0, v_port_octets_total = 0;
+
+		/*
+		 * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes
+		 * the number of exception queues which must be 1 - for now. The code is kept if we
+		 * want it in future, but it will not be likely.
+		 * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.
+		 *
+		 * Only use the vPort Tx counter to update OVS, since these are the real ones.
+		 * The rep port into OVS that represents this port will always replicate the traffic
+		 * here, also when no offload occurs
+		 */
+		for (i = 0; i < internals->vpq_nb_vq; ++i) {
+			v_port_packets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_packets;
+			v_port_octets_total +=
+				p_hb_counters[internals->vpq[i].id].fwd_bytes;
+		}
+		/* Add the statistics increments since last time we polled */
+		internals->txq_scg[0].tx_pkts +=
+			v_port_packets_total -
+			p_nt4ga_stat->a_port_tx_packets_base[if_index];
+		internals->txq_scg[0].tx_bytes +=
+			v_port_octets_total -
+			p_nt4ga_stat->a_port_tx_octets_base[if_index];
+		internals->txq_scg[0].err_pkts += 0; /* What to user here ?? */
+
+		/* _update the increment bases */
+		p_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;
+		p_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;
+	}
+
+	/* Globally only once a second */
+	if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
+		rte_spinlock_unlock(&hwlock);
+		pthread_mutex_unlock(&p_nt_drv->stat_lck);
+		return 0;
+	}
+
+	last_stat_rtc = now_rtc;
+
+	/* All color counter are global, therefore only 1 pmd must update them */
+	const struct color_counters *p_color_counters =
+			p_nt4ga_stat->mp_stat_structs_color;
+	struct color_counters *p_color_counters_base =
+			p_nt4ga_stat->a_stat_structs_color_base;
+	uint64_t color_packets_accumulated, color_bytes_accumulated;
+
+	for (flow = 0; flow < MAX_RTE_FLOWS; flow++) {
+		if (nt_flows[flow].used) {
+			unsigned int color = nt_flows[flow].flow_stat_id;
+
+			if (color < NT_MAX_COLOR_FLOW_STATS) {
+				color_packets_accumulated =
+					p_color_counters[color].color_packets;
+				nt_flows[flow].stat_pkts +=
+					(color_packets_accumulated -
+					 p_color_counters_base[color].color_packets);
+
+				nt_flows[flow].stat_tcp_flags |=
+					p_color_counters[color].tcp_flags;
+
+				color_bytes_accumulated =
+					p_color_counters[color].color_bytes;
+				nt_flows[flow].stat_bytes +=
+					(color_bytes_accumulated -
+					 p_color_counters_base[color].color_bytes);
+
+				/* _update the counter bases */
+				p_color_counters_base[color].color_packets =
+					color_packets_accumulated;
+				p_color_counters_base[color].color_bytes =
+					color_bytes_accumulated;
+			}
+		}
+	}
+
+	rte_spinlock_unlock(&hwlock);
+	pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+	return 0;
+}
+
+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+			  const struct rte_flow_action *action, void *data,
+			  struct rte_flow_error *err)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	err->cause = NULL;
+	err->message = NULL;
+
+	if (is_flow_handle_typecast(flow)) {
+		rte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Error in flow handle");
+		return -1;
+	}
+
+	poll_statistics(internals);
+
+	if (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {
+		struct rte_flow_query_count *qcnt =
+			(struct rte_flow_query_count *)data;
+		if (qcnt) {
+			if (flow) {
+				qcnt->hits = flow->stat_pkts;
+				qcnt->hits_set = 1;
+				qcnt->bytes = flow->stat_bytes;
+				qcnt->bytes_set = 1;
+
+				if (qcnt->reset) {
+					flow->stat_pkts = 0UL;
+					flow->stat_bytes = 0UL;
+					flow->stat_tcp_flags = 0;
+				}
+			} else {
+				qcnt->hits_set = 0;
+				qcnt->bytes_set = 0;
+			}
+		}
+	} else {
+		rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "Unsupported query");
+		return -1;
+	}
+	rte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, "Success");
+	return 0;
+}
+
+#ifdef DEBUGGING
+
+static void _print_tunnel(struct rte_flow_tunnel *tunnel)
+{
+	struct in_addr addr;
+
+	NT_LOG(DBG, FILTER, "    tun type: %i\n", tunnel->type);
+	NT_LOG(DBG, FILTER, "    tun ID: %016lx\n", tunnel->tun_id);
+	addr.s_addr = tunnel->ipv4.src_addr;
+	NT_LOG(DBG, FILTER, "    tun src IP: %s\n", inet_ntoa(addr));
+	addr.s_addr = tunnel->ipv4.dst_addr;
+	NT_LOG(DBG, FILTER, "    tun dst IP: %s\n", inet_ntoa(addr));
+	NT_LOG(DBG, FILTER, "    tun tp_src: %i\n", htons(tunnel->tp_src));
+	NT_LOG(DBG, FILTER, "    tun tp_dst: %i\n", htons(tunnel->tp_dst));
+	NT_LOG(DBG, FILTER, "    tun flags:  %i\n", tunnel->tun_flags);
+	NT_LOG(DBG, FILTER, "    tun ipv6:  %i\n", tunnel->is_ipv6);
+
+	NT_LOG(DBG, FILTER, "    tun tos:   %i\n", tunnel->tos);
+	NT_LOG(DBG, FILTER, "    tun ttl:   %i\n", tunnel->ttl);
+}
+#endif
+
+static struct rte_flow_action _pmd_actions[] = {
+	{	.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
+		.conf = NULL
+	},
+	{ .type = 0, .conf = NULL }
+};
+
+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_tunnel *tunnel,
+				  struct rte_flow_action **pmd_actions,
+				  uint32_t *num_of_actions,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+	else
+		return -ENOTSUP;
+
+	*pmd_actions = _pmd_actions;
+	*num_of_actions = 2;
+
+	return 0;
+}
+
+static struct rte_flow_item _pmd_items = {
+	.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,
+	.spec = NULL,
+	.last = NULL,
+	.mask = NULL
+};
+
+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,
+			      struct rte_flow_tunnel *tunnel _unused,
+			      struct rte_flow_item **pmd_items,
+			      uint32_t *num_of_items,
+			      struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+
+	*pmd_items = &_pmd_items;
+	*num_of_items = 1;
+	return 0;
+}
+
+/*
+ * Restoration API support
+ */
+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+	NT_LOG(DBG, FILTER, "dev name: %s - port_id %i\n", dev->data->name, dev->data->port_id);
+	NT_LOG(DBG, FILTER, "dpdk tunnel mark %08x\n", m->hash.fdir.hi);
+#endif
+
+	if ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {
+		uint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;
+		uint32_t stat_id = m->hash.fdir.lo & 0xffffff;
+
+		struct tunnel_cfg_s tuncfg;
+		int ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);
+
+		if (ret)
+			return -EINVAL;
+
+		if (tuncfg.ipversion == 4) {
+			info->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;
+			info->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;
+			info->tunnel.is_ipv6 = 0;
+		} else {
+			/* IPv6 */
+			for (int i = 0; i < 16; i++) {
+				info->tunnel.ipv6.src_addr[i] =
+					tuncfg.v6.src_ip[i];
+				info->tunnel.ipv6.dst_addr[i] =
+					tuncfg.v6.dst_ip[i];
+			}
+			info->tunnel.is_ipv6 = 1;
+		}
+
+		info->tunnel.tp_dst = tuncfg.d_port;
+		info->tunnel.tp_src = tuncfg.s_port;
+
+		info->tunnel.ttl = 64;
+		info->tunnel.tos = 0;
+
+		/* FLOW_TNL_F_KEY | FLOW_TNL_F_DO_NOT_FRAGMENT */
+		info->tunnel.tun_flags = (1 << 3) | (1 << 1);
+
+		info->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;
+		info->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;
+
+		info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;
+		/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */
+		info->group_id = 0;
+
+#ifdef DEBUGGING
+		_print_tunnel(&info->tunnel);
+#endif
+
+		return 0;
+	}
+	return -EINVAL; /* Supported, but no hit found */
+}
+
+static int
+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,
+				  struct rte_flow_action *pmd_actions _unused,
+				  uint32_t num_of_actions _unused,
+				  struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,
+				     struct rte_flow_item *pmd_items _unused,
+				     uint32_t num_of_items _unused,
+				     struct rte_flow_error *err _unused)
+{
+#ifdef DEBUGGING
+	NT_LOG(DBG, FILTER, "%s: [%s:%u] start\n", __func__, __FILE__, __LINE__);
+#endif
+	return 0;
+}
+
+const struct rte_flow_ops _dev_flow_ops = {
+	.validate = eth_flow_validate,
+	.create = eth_flow_create,
+	.destroy = eth_flow_destroy,
+	.flush = NULL,
+	.query = eth_flow_query,
+	.tunnel_decap_set = ntnic_tunnel_decap_set,
+	.tunnel_match = ntnic_tunnel_match,
+	.get_restore_info = ntnic_get_restore_info,
+	.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,
+	.tunnel_item_release = ntnic_tunnel_item_release
+
+};
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
new file mode 100644
index 0000000000..cf4207e5de
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_FILTER_H__
+#define __NTNIC_FILTER_H__
+
+struct rte_flow *
+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,
+		   struct cnv_attr_s *attribute, struct cnv_match_s *match,
+		   struct cnv_action_s *action, uint32_t flow_stat_id,
+		   struct rte_flow_error *error);
+
+#endif /* __NTNIC_FILTER_H__ */
diff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c
new file mode 100644
index 0000000000..a8eff76528
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <ntlog.h>
+#include <flow_api.h>
+
+#include "ntnic_hshconfig.h"
+
+#include <rte_ethdev.h>
+#include <nthw_helper.h>
+
+struct pair_uint64_t {
+	uint64_t first;
+	uint64_t second;
+};
+
+#define PAIR_NT(name)                 \
+	{                             \
+		RTE_##name, NT_##name \
+	}
+
+struct pair_uint64_t rte_eth_rss_to_nt[] = {
+	PAIR_NT(ETH_RSS_IPV4),
+	PAIR_NT(ETH_RSS_FRAG_IPV4),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),
+	PAIR_NT(ETH_RSS_IPV6),
+	PAIR_NT(ETH_RSS_FRAG_IPV6),
+	PAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),
+	PAIR_NT(ETH_RSS_IPV6_EX),
+	PAIR_NT(ETH_RSS_C_VLAN),
+	PAIR_NT(ETH_RSS_L3_DST_ONLY),
+	PAIR_NT(ETH_RSS_L3_SRC_ONLY),
+	PAIR_NT(ETH_RSS_LEVEL_OUTERMOST),
+	PAIR_NT(ETH_RSS_LEVEL_INNERMOST),
+};
+
+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->first == rte_flag)
+			return &p->second;
+	}
+	return NULL; /* NOT found */
+}
+
+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)
+{
+	const struct pair_uint64_t *start = rte_eth_rss_to_nt;
+
+	for (const struct pair_uint64_t *p = start;
+			p != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {
+		if (p->second == nt_flag)
+			return &p->first;
+	}
+	return NULL; /* NOT found */
+}
+
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)
+{
+	struct nt_eth_rss res = { 0 };
+
+	for (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {
+		uint64_t rte_bit = (UINT64_C(1) << i);
+
+		if (rte_hash_bits & rte_bit) {
+			const uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);
+
+			if (!nt_bit_p) {
+				NT_LOG(ERR, ETHDEV,
+				       "RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.",
+				       i);
+			} else {
+				res.fields |= *nt_bit_p;
+			}
+		}
+	}
+
+	return res;
+}
+
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)
+{
+	uint64_t res = 0;
+
+	for (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {
+		uint64_t nt_bit = (UINT64_C(1) << i);
+
+		if (nt_hsh.fields & nt_bit) {
+			const uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);
+
+			assert(rte_bit_p &&
+			       "All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options");
+			res |= *rte_bit_p;
+		}
+	}
+
+	return res;
+}
diff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h
new file mode 100644
index 0000000000..d4d7337d23
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_hshconfig.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <flow_api.h>
+
+/* Mapping from dpdk rss hash defines to nt hash defines */
+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);
+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);
diff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c
new file mode 100644
index 0000000000..027ae073dd
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.c
@@ -0,0 +1,811 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_meter.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "ntdrv_4ga.h"
+#include "nthw_fpga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_meter.h"
+#include "ntlog.h"
+
+/*
+ *******************************************************************************
+ * Vswitch metering
+ *******************************************************************************
+ */
+
+static const uint32_t highest_bit_mask = (~(~0u >> 1));
+
+static struct nt_mtr_profile *
+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)
+{
+	struct nt_mtr_profile *profile = NULL;
+
+	LIST_FOREACH(profile, &dev_priv->mtr_profiles, next)
+	if (profile->profile_id == meter_profile_id)
+		break;
+
+	return profile;
+}
+
+static int eth_meter_profile_add(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = meter_profile_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {
+		struct nt_mtr_profile *prof;
+
+		prof = nt_mtr_profile_find(dev_priv, meter_profile_id);
+		if (prof)
+			return -rte_mtr_error_set(error, EEXIST,
+						  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+						  NULL,
+						  "Profile id already exists\n");
+
+		prof = rte_zmalloc(NULL, sizeof(*prof), 0);
+		if (!prof) {
+			return -rte_mtr_error_set(error,
+						  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+						  NULL, NULL);
+		}
+
+		prof->profile_id = meter_profile_id;
+		memcpy(&prof->profile, profile,
+		       sizeof(struct rte_mtr_meter_profile));
+
+		LIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);
+
+		return 0;
+	}
+	/* Ingress is not possible yet on phy ports */
+	return -rte_mtr_error_set(error, EINVAL,
+		RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+		"Traffic ingress metering/policing is not supported on physical ports\n");
+}
+
+static int eth_meter_profile_delete(struct rte_eth_dev *dev,
+				    uint32_t meter_profile_id,
+				    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	profile = nt_mtr_profile_find(dev_priv, meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, ENODEV,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
+	LIST_REMOVE(profile, next);
+	rte_free(profile);
+	return 0;
+}
+
+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,
+				  uint32_t mtr_id)
+{
+	struct nt_mtr *mtr = NULL;
+
+	LIST_FOREACH(mtr, &dev_priv->mtrs, next)
+	if (mtr->mtr_id == mtr_id)
+		break;
+
+	return mtr;
+}
+
+struct qos_integer_fractional {
+	uint32_t integer;
+	uint32_t fractional; /* 1/1024 */
+};
+
+/*
+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional
+ * the period depends on the clock friquency and other parameters which
+ * being combined give multiplier. The resulting formula is:
+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12
+ */
+static struct qos_integer_fractional
+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)
+{
+	struct qos_integer_fractional res;
+	const uint64_t dividend = byte_per_second * period_ps;
+	const uint64_t divisor = 1000000000000ull; /*10^12 pico second*/
+
+	res.integer = dividend / divisor;
+	const uint64_t reminder = dividend % divisor;
+
+	res.fractional = 1024ull * reminder / divisor;
+	return res;
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);
+}
+
+static struct qos_integer_fractional
+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)
+{
+	return byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);
+}
+
+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			    struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	int res;
+	static int ingress_initial;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	/*
+	 *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used
+	 *   rfc4115.cir = rfc2697.cir
+	 *   rfc4115.eir = rfc2697.cir
+	 *   rfc4115.cbs = rfc2697.cbs
+	 *   rfc4115.ebs = rfc2697.ebs
+	 */
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	if (!mtr->profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Meter profile id not found\n");
+	}
+
+	const uint32_t profile_id = mtr->profile->profile_id;
+	const bool is_egress = profile_id & highest_bit_mask;
+	uint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+		struct qos_integer_fractional cir = { 0 };
+
+		if (is_virtual) {
+			cir =
+			byte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,
+						  cir.integer, cir.fractional,
+						  burst);
+		} else {
+			cir =
+				byte_per_second_to_physical_qo_s_ri(mtr->profile->profile
+								    .srtcm_rfc2697.cir);
+			if (cir.integer == 0 && cir.fractional == 0)
+				cir.fractional = 1;
+			res = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,
+						cir.integer, cir.fractional,
+						burst);
+		}
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"Applying meter profile for setting egress policy failed\n");
+		}
+	} else {
+		if (!ingress_initial) {
+			/*
+			 * based on a 250Mhz FPGA
+			 * _update refresh rate interval calculation:
+			 * multiplier / (divider * 4ns)
+			 * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns
+			 *
+			 * results in resolution of IR is 1Mbps
+			 */
+			res = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);
+
+			if (res) {
+				return -rte_mtr_error_set(error, EINVAL,
+					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					"Applying meter profile for setting ingress "
+					"global QoS rate failed\n");
+			}
+			ingress_initial = 1;
+		}
+
+		if (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {
+			/* max burst 1,074Mb (27 bits) */
+			mtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;
+		}
+		/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */
+		res = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+					     1, /* enable */
+					     mtr->profile->profile.srtcm_rfc2697.cir /
+					     125000,
+					     mtr->profile->profile.srtcm_rfc2697
+					     .cbs); /* BS - burst size in Bytes */
+		if (res) {
+			return -rte_mtr_error_set(error, EINVAL,
+				RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+				NULL, "Applying meter profile failed\n");
+		}
+	}
+	return 0;
+}
+
+static void disable(struct pmd_internals *dev_priv)
+{
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_dbs_t *p_nthw_dbs =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;
+	nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */
+			       0, /* disable */
+			       0, /* IR */
+			       0); /* BS */
+}
+
+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+			     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	if (!mtr) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id not found\n");
+	}
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	return 0;
+}
+
+/* MTR object create */
+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+			  struct rte_mtr_params *params, int shared,
+			  struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr = NULL;
+	struct nt_mtr_profile *profile;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {
+		NT_LOG(ERR, NTHW,
+		       "ERROR try to create ingress meter object on a phy port. Not supported\n");
+
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Traffic ingress metering/policing is not supported on physical ports\n");
+	}
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id already exists\n");
+
+	profile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);
+	if (!profile) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+	}
+
+	mtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);
+	if (!mtr)
+		return -rte_mtr_error_set(error, ENOMEM,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  NULL);
+
+	mtr->shared = shared;
+	mtr->mtr_id = mtr_id;
+	mtr->profile = profile;
+	LIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);
+
+	if (params->meter_enable)
+		return eth_meter_enable(dev, mtr_id, error);
+
+	return 0;
+}
+
+/* MTR object destroy */
+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+			   struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+	struct nt_mtr *mtr;
+
+	NT_LOG(DBG, NTHW, "%s: [%s:%u] adapter: " PCIIDENT_PRINT_STR "\n",
+	       __func__, __func__, __LINE__,
+	       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),
+	       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));
+
+	nthw_epp_t *p_nthw_epp =
+		dev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;
+
+	mtr = nt_mtr_find(dev_priv, mtr_id);
+	if (!mtr)
+		return -rte_mtr_error_set(error, EEXIST,
+					  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+					  "Meter id does not exist\n");
+
+	const bool is_egress = mtr_id & highest_bit_mask;
+
+	if (is_egress) {
+		const bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);
+
+		if (is_virtual)
+			nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+		else
+			nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);
+	} else {
+		disable(dev_priv);
+	}
+	LIST_REMOVE(mtr, next);
+	rte_free(mtr);
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Inline FLM metering
+ *******************************************************************************
+ */
+
+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,
+		struct rte_mtr_capabilities *cap,
+		struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (!flow_mtr_supported(dev_priv->flw_dev)) {
+		return -rte_mtr_error_set(error, EINVAL,
+			RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+			"Ethernet device does not support metering\n");
+	}
+
+	memset(cap, 0x0, sizeof(struct rte_mtr_capabilities));
+
+	/* MBR records use 28-bit integers */
+	cap->n_max = flow_mtr_meters_supported();
+	cap->n_shared_max = cap->n_max;
+
+	cap->identical = 0;
+	cap->shared_identical = 0;
+
+	cap->shared_n_flows_per_mtr_max = UINT32_MAX;
+
+	/* Limited by number of MBR record ids per FLM learn record */
+	cap->chaining_n_mtrs_per_flow_max = 4;
+
+	cap->chaining_use_prev_mtr_color_supported = 0;
+	cap->chaining_use_prev_mtr_color_enforced = 0;
+
+	cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;
+
+	cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+			  RTE_MTR_STATS_N_BYTES_GREEN;
+
+	/* Only color-blind mode is supported */
+	cap->color_aware_srtcm_rfc2697_supported = 0;
+	cap->color_aware_trtcm_rfc2698_supported = 0;
+	cap->color_aware_trtcm_rfc4115_supported = 0;
+
+	/* Focused on RFC2698 for now */
+	cap->meter_srtcm_rfc2697_n_max = 0;
+	cap->meter_trtcm_rfc2698_n_max = cap->n_max;
+	cap->meter_trtcm_rfc4115_n_max = 0;
+
+	cap->meter_policy_n_max = flow_mtr_meter_policy_n_max();
+
+	/* Byte mode is supported */
+	cap->srtcm_rfc2697_byte_mode_supported = 0;
+	cap->trtcm_rfc2698_byte_mode_supported = 1;
+	cap->trtcm_rfc4115_byte_mode_supported = 0;
+
+	/* Packet mode not supported */
+	cap->srtcm_rfc2697_packet_mode_supported = 0;
+	cap->trtcm_rfc2698_packet_mode_supported = 0;
+	cap->trtcm_rfc4115_packet_mode_supported = 0;
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,
+				 uint32_t meter_profile_id,
+				 struct rte_mtr_meter_profile *profile,
+				 struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (profile->packet_mode != 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,
+					  "Profile packet mode not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_SRTCM_RFC2697) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 2697 not supported\n");
+	}
+
+	if (profile->alg == RTE_MTR_TRTCM_RFC4115) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL, "RFC 4115 not supported\n");
+	}
+
+	if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||
+			profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,
+					  "Profile committed and peak rates must be equal\n");
+	}
+
+	int res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,
+				       profile->trtcm_rfc2698.cir,
+				       profile->trtcm_rfc2698.cbs, 0, 0);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE,
+					  NULL,
+					  "Profile could not be added.\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				    uint32_t meter_profile_id __rte_unused,
+				    struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,
+				struct rte_mtr_meter_policy_params *policy,
+				struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	const struct rte_flow_action *actions =
+			policy->actions[RTE_COLOR_GREEN];
+	int green_action_supported =
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||
+		(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&
+		 actions[1].type == RTE_FLOW_ACTION_TYPE_END);
+
+	actions = policy->actions[RTE_COLOR_YELLOW];
+	int yellow_action_supported =
+		actions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	actions = policy->actions[RTE_COLOR_RED];
+	int red_action_supported = actions[0].type ==
+				   RTE_FLOW_ACTION_TYPE_DROP &&
+				   actions[1].type == RTE_FLOW_ACTION_TYPE_END;
+
+	if (green_action_supported == 0 || yellow_action_supported == 0 ||
+			red_action_supported == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Unsupported meter policy actions\n");
+	}
+
+	if (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+					  "Policy could not be added\n");
+	}
+
+	return 0;
+}
+
+static int
+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,
+				   uint32_t policy_id __rte_unused,
+				   struct rte_mtr_error *error __rte_unused)
+{
+	if (policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	return 0;
+}
+
+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				 struct rte_mtr_params *params, int shared,
+				 struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only color blind mode is supported\n");
+	}
+
+	uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |
+				      RTE_MTR_STATS_N_BYTES_GREEN;
+	if ((params->stats_mask & ~allowed_stats_mask) != 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Requested color stats not supported\n");
+	}
+
+	if (params->meter_enable == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Disabled meters not supported\n");
+	}
+
+	if (shared == 0) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Only shared mtrs are supported\n");
+	}
+
+	if (params->meter_profile_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id out of range\n");
+
+	if (params->meter_policy_id >= flow_mtr_meter_policy_n_max())
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+					  NULL, "Policy id out of range\n");
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	int res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,
+					params->meter_profile_id,
+					params->meter_policy_id,
+					params->stats_mask);
+
+	if (res) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				  struct rte_mtr_error *error __rte_unused)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to offload to hardware\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				       uint64_t adjust_value,
+				       struct rte_mtr_error *error)
+{
+	const uint64_t adjust_bit = 1ULL << 63;
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	if ((adjust_value & adjust_bit) == 0) {
+		return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+			"To adjust a MTR bucket value, bit 63 of \"stats_mask\" must be 1\n");
+	}
+
+	adjust_value &= adjust_bit - 1;
+
+	if (adjust_value > (uint64_t)UINT32_MAX) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "Adjust value is out of range\n");
+	}
+
+	if (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,
+				 (uint32_t)adjust_value)) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Failed to adjust offloaded MTR\n");
+	}
+
+	return 0;
+}
+
+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,
+				     struct rte_mtr_stats *stats,
+				     uint64_t *stats_mask, int clear,
+				     struct rte_mtr_error *error)
+{
+	struct pmd_internals *dev_priv = dev->data->dev_private;
+
+	if (mtr_id >= flow_mtr_meters_supported()) {
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,
+					  "MTR id is out of range\n");
+	}
+
+	memset(stats, 0x0, sizeof(struct rte_mtr_stats));
+	flm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,
+			   &stats->n_pkts[RTE_COLOR_GREEN],
+			   &stats->n_bytes[RTE_COLOR_GREEN], clear);
+
+	return 0;
+}
+
+/*
+ *******************************************************************************
+ * Ops setup
+ *******************************************************************************
+ */
+
+static const struct rte_mtr_ops mtr_ops_vswitch = {
+	.meter_profile_add = eth_meter_profile_add,
+	.meter_profile_delete = eth_meter_profile_delete,
+	.create = eth_mtr_create,
+	.destroy = eth_mtr_destroy,
+	.meter_enable = eth_meter_enable,
+	.meter_disable = eth_meter_disable,
+};
+
+static const struct rte_mtr_ops mtr_ops_inline = {
+	.capabilities_get = eth_mtr_capabilities_get_inline,
+	.meter_profile_add = eth_mtr_meter_profile_add_inline,
+	.meter_profile_delete = eth_mtr_meter_profile_delete_inline,
+	.create = eth_mtr_create_inline,
+	.destroy = eth_mtr_destroy_inline,
+	.meter_policy_add = eth_mtr_meter_policy_add_inline,
+	.meter_policy_delete = eth_mtr_meter_policy_delete_inline,
+	.stats_update = eth_mtr_stats_adjust_inline,
+	.stats_read = eth_mtr_stats_read_inline,
+};
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
+{
+	struct pmd_internals *internals =
+		(struct pmd_internals *)dev->data->dev_private;
+	ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;
+	enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;
+
+	switch (profile) {
+	case FPGA_INFO_PROFILE_VSWITCH:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;
+		break;
+	case FPGA_INFO_PROFILE_INLINE:
+		*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;
+		break;
+	case FPGA_INFO_PROFILE_UNKNOWN:
+	/* fallthrough */
+	case FPGA_INFO_PROFILE_CAPTURE:
+	/* fallthrough */
+	default:
+		NT_LOG(ERR, NTHW,
+		       "" PCIIDENT_PRINT_STR
+		       ": fpga profile not supported [%s:%u]\n",
+		       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
+		       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
+		       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
+		       __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h
new file mode 100644
index 0000000000..9484c9ee20
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_meter.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_METER_H__
+#define __NTNIC_METER_H__
+
+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);
+
+#endif /* __NTNIC_METER_H__ */
diff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c
new file mode 100644
index 0000000000..6372514527
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <linux/virtio_net.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <vhost.h>
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_ethdev.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vfio.h"
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_PORTS 128UL
+
+struct vdpa_port {
+	char ifname[MAX_PATH_LEN];
+	struct rte_vdpa_device *vdev;
+	int vid;
+	uint32_t index;
+	uint32_t host_id;
+	uint32_t rep_port;
+	int rxqs;
+	int txqs;
+	uint64_t flags;
+	struct rte_pci_addr addr;
+};
+
+static struct vdpa_port vport[MAX_VDPA_PORTS];
+static uint32_t nb_vpda_devcnt;
+
+static int nthw_vdpa_start(struct vdpa_port *vport);
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port)
+{
+	uint32_t i;
+
+	for (i = 0; i < nb_vpda_devcnt; i++) {
+		if (vport[i].vdev == vdpa_dev) {
+			if (rx) {
+				if (queue_id >= vport[i].rxqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			} else {
+				if (queue_id >= vport[i].txqs) {
+					NT_LOG(ERR, VDPA,
+					       "Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\n",
+					       __func__, vdpa_dev, queue_id,
+					       vport[i].rxqs);
+					return -1;
+				}
+				*hw_index = vport[i].index + queue_id;
+			}
+
+			*host_id = vport[i].host_id;
+			*rep_port = vport[i].rep_port;
+			return 0;
+		}
+	}
+
+	NT_LOG(ERR, VDPA,
+	       "Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\n",
+	       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);
+	return -1;
+}
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname _unused, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid)
+{
+	int ret;
+	uint32_t host_id = nt_vfio_vf_num(vdev);
+
+	struct rte_vdpa_device *vdpa_dev =
+		rte_vdpa_find_device_by_name(vdev->name);
+	if (!vdpa_dev) {
+		NT_LOG(ERR, VDPA, "vDPA device with name %s - not found\n",
+		       vdev->name);
+		return -1;
+	}
+
+	vport[nb_vpda_devcnt].vdev = vdpa_dev;
+	vport[nb_vpda_devcnt].host_id = host_id; /* VF # */
+	vport[nb_vpda_devcnt].index = index; /* HW ring index */
+	vport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */
+	vport[nb_vpda_devcnt].rxqs = rxqs;
+	vport[nb_vpda_devcnt].txqs = txqs;
+	vport[nb_vpda_devcnt].addr = vdev->addr;
+
+	vport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;
+	strlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA,
+	       "vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\n",
+	       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,
+	       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);
+
+	ret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);
+
+	*vhid = nb_vpda_devcnt;
+	nb_vpda_devcnt++;
+	return ret;
+}
+
+void nthw_vdpa_close(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (vport[i].ifname[0] != '\0') {
+			int ret;
+			char *socket_path = vport[i].ifname;
+
+			ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "detach vdpa device failed: %s\n",
+				       socket_path);
+			}
+
+			ret = rte_vhost_driver_unregister(socket_path);
+			if (ret != 0) {
+				NT_LOG(ERR, VDPA,
+				       "Fail to unregister vhost driver for %s.\n",
+				       socket_path);
+			}
+
+			vport[i].ifname[0] = '\0';
+			return;
+		}
+	}
+}
+
+#ifdef DUMP_VIRTIO_FEATURES
+#define VIRTIO_F_NOTIFICATION_DATA 38
+#define NUM_FEATURES 40
+struct {
+	uint64_t id;
+	const char *name;
+} virt_features[NUM_FEATURES] = {
+	{ VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" },
+	{ VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" },
+	{	VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
+		"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"
+	},
+	{ VIRTIO_NET_F_MTU, "  VIRTIO_NET_F_MTU" },
+	{ VIRTIO_NET_F_MAC, "  VIRTIO_NET_F_MAC" },
+	{ VIRTIO_NET_F_GSO, "  VIRTIO_NET_F_GSO" },
+	{ VIRTIO_NET_F_GUEST_TSO4, "  VIRTIO_NET_F_GUEST_TSO4" },
+	{ VIRTIO_NET_F_GUEST_TSO6, "  VIRTIO_NET_F_GUEST_TSO6" },
+	{ VIRTIO_NET_F_GUEST_ECN, "  VIRTIO_NET_F_GUEST_ECN" },
+	{ VIRTIO_NET_F_GUEST_UFO, "  VIRTIO_NET_F_GUEST_UFO" },
+	{ VIRTIO_NET_F_HOST_TSO4, "  VIRTIO_NET_F_HOST_TSO4" },
+	{ VIRTIO_NET_F_HOST_TSO6, "  VIRTIO_NET_F_HOST_TSO6" },
+	{ VIRTIO_NET_F_HOST_ECN, "  VIRTIO_NET_F_HOST_ECN" },
+	{ VIRTIO_NET_F_HOST_UFO, "  VIRTIO_NET_F_HOST_UFO" },
+	{ VIRTIO_NET_F_MRG_RXBUF, "  VIRTIO_NET_F_MRG_RXBUF" },
+	{ VIRTIO_NET_F_STATUS, "  VIRTIO_NET_F_STATUS" },
+	{ VIRTIO_NET_F_CTRL_VQ, "  VIRTIO_NET_F_CTRL_VQ" },
+	{ VIRTIO_NET_F_CTRL_RX, "  VIRTIO_NET_F_CTRL_RX" },
+	{ VIRTIO_NET_F_CTRL_VLAN, "  VIRTIO_NET_F_CTRL_VLAN" },
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA, "  VIRTIO_NET_F_CTRL_RX_EXTRA" },
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE, "  VIRTIO_NET_F_GUEST_ANNOUNCE" },
+	{ VIRTIO_NET_F_MQ, "  VIRTIO_NET_F_MQ" },
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR, "  VIRTIO_NET_F_CTRL_MAC_ADDR" },
+	{ VIRTIO_NET_F_HASH_REPORT, "  VIRTIO_NET_F_HASH_REPORT" },
+	{ VIRTIO_NET_F_RSS, "  VIRTIO_NET_F_RSS" },
+	{ VIRTIO_NET_F_RSC_EXT, "  VIRTIO_NET_F_RSC_EXT" },
+	{ VIRTIO_NET_F_STANDBY, "  VIRTIO_NET_F_STANDBY" },
+	{ VIRTIO_NET_F_SPEED_DUPLEX, "  VIRTIO_NET_F_SPEED_DUPLEX" },
+	{ VIRTIO_F_NOTIFY_ON_EMPTY, "  VIRTIO_F_NOTIFY_ON_EMPTY" },
+	{ VIRTIO_F_ANY_LAYOUT, "  VIRTIO_F_ANY_LAYOUT" },
+	{ VIRTIO_RING_F_INDIRECT_DESC, "  VIRTIO_RING_F_INDIRECT_DESC" },
+	{ VIRTIO_F_VERSION_1, "  VIRTIO_F_VERSION_1" },
+	{ VIRTIO_F_IOMMU_PLATFORM, "  VIRTIO_F_IOMMU_PLATFORM" },
+	{ VIRTIO_F_RING_PACKED, "  VIRTIO_F_RING_PACKED" },
+	{ VIRTIO_TRANSPORT_F_START, "  VIRTIO_TRANSPORT_F_START" },
+	{ VIRTIO_TRANSPORT_F_END, "  VIRTIO_TRANSPORT_F_END" },
+	{ VIRTIO_F_IN_ORDER, "  VIRTIO_F_IN_ORDER" },
+	{ VIRTIO_F_ORDER_PLATFORM, "  VIRTIO_F_ORDER_PLATFORM" },
+	{ VIRTIO_F_NOTIFICATION_DATA, "  VIRTIO_F_NOTIFICATION_DATA" },
+};
+
+static void dump_virtio_features(uint64_t features)
+{
+	int i;
+
+	for (i = 0; i < NUM_FEATURES; i++) {
+		if ((1ULL << virt_features[i].id) ==
+				(features & (1ULL << virt_features[i].id)))
+			printf("Virtio feature: %s\n", virt_features[i].name);
+	}
+}
+#endif
+
+static int nthw_vdpa_new_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint64_t negotiated_features = 0;
+	unsigned int vhid = -1;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			vport[vhid].vid = vid;
+			break;
+		}
+	}
+
+	if (vhid >= MAX_VDPA_PORTS)
+		return -1;
+
+	int max_loops = 2000;
+	struct pmd_internals *intern;
+
+	while ((intern = vp_vhid_instance_ready(vhid)) == NULL) {
+		usleep(1000);
+		if (--max_loops == 0) {
+			NT_LOG(INF, VDPA,
+			       "FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\n",
+			       ifname, vport[vhid].vdev->device->name);
+			return -1;
+		}
+	}
+
+	/* set link up on virtual port */
+	intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+
+	/* Store ifname (vhost_path) */
+	strlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);
+
+	NT_LOG(INF, VDPA, "New port %s, vDPA dev: %s\n", ifname,
+	       vport[vhid].vdev->device->name);
+	rte_vhost_get_negotiated_features(vid, &negotiated_features);
+	NT_LOG(INF, VDPA, "Virtio Negotiated features %016lx\n",
+	       negotiated_features);
+
+#ifdef DUMP_VIRTIO_FEATURES
+	dump_virtio_features(negotiated_features);
+#endif
+
+	if ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||
+			((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {
+		/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */
+		NT_LOG(INF, VDPA, "Running virtio in vDPA mode : %s  %s\n",
+		       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+		       "\"Packed-Ring\"" :
+		       "\"Split-Ring\"",
+		       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?
+		       "\"In-Order\"" :
+		       "\"No In-Order Requested\"");
+
+		intern->vport_comm =
+			(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?
+			VIRT_PORT_NEGOTIATED_PACKED :
+			VIRT_PORT_NEGOTIATED_SPLIT;
+	} else {
+		NT_LOG(ERR, VDPA, "Incompatible virtio negotiated features.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void nthw_vdpa_destroy_device(int vid)
+{
+	char ifname[MAX_PATH_LEN];
+	uint32_t i;
+	unsigned int vhid;
+
+	rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+	for (i = 0; i < MAX_VDPA_PORTS; i++) {
+		if (strcmp(ifname, vport[i].ifname) == 0) {
+			NT_LOG(INF, VDPA, "\ndestroy port %s, vDPA dev: %s\n",
+			       ifname, vport[i].vdev->device->name);
+			break;
+		}
+	}
+
+	struct pmd_internals *intern;
+
+	/* set link down on virtual port */
+	for (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {
+		if (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {
+			intern = vp_vhid_instance_ready(vhid);
+			if (intern)
+				intern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;
+			break;
+		}
+	}
+}
+
+static const struct rte_vhost_device_ops vdpa_devops = {
+	.new_device = nthw_vdpa_new_device,
+	.destroy_device = nthw_vdpa_destroy_device,
+};
+
+static int nthw_vdpa_start(struct vdpa_port *vport)
+{
+	int ret;
+	char *socket_path = vport->ifname;
+
+	ret = rte_vhost_driver_register(socket_path, vport->flags);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver failed: %s\n", socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);
+	if (ret != 0) {
+		NT_LOG(ERR, VDPA, "register driver ops failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	ret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+						(1ULL << VIRTIO_NET_F_HOST_TSO6) |
+						(1ULL << VIRTIO_NET_F_CSUM) |
+						(1ULL << VIRTIO_RING_F_EVENT_IDX) |
+						(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+						(1ULL << VIRTIO_NET_F_HOST_UFO) |
+						(1ULL << VIRTIO_NET_F_HOST_ECN) |
+						(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+						(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+						(1ULL << VIRTIO_NET_F_GUEST_UFO) |
+						(1ULL << VIRTIO_NET_F_GUEST_ECN) |
+						(1ULL << VIRTIO_NET_F_CTRL_VQ) |
+						(1ULL << VIRTIO_NET_F_CTRL_RX) |
+						(1ULL << VIRTIO_NET_F_GSO) |
+						(1ULL << VIRTIO_NET_F_MTU));
+
+	if (ret != 0) {
+		NT_LOG(INF, VDPA,
+		       "rte_vhost_driver_disable_features failed for vhost user client port: %s\n",
+		       socket_path);
+		return -1;
+	}
+
+	if (rte_vhost_driver_start(socket_path) < 0) {
+		NT_LOG(ERR, VDPA, "start vhost driver failed: %s\n",
+		       socket_path);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h
new file mode 100644
index 0000000000..7acc2c8e4b
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vdpa.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VDPA_H_
+#define _NTNIC_VDPA_H_
+
+#include <stdint.h>
+
+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,
+				int queue_id, uint32_t *hw_index,
+				uint32_t *host_id, uint32_t *rep_port);
+
+int nthw_vdpa_init(const struct rte_pci_device *vdev,
+		   const char *backing_devname, const char *socket_path,
+		   uint32_t index, int rxqs, int txqs, uint32_t rep_port,
+		   int *vhid);
+
+void nthw_vdpa_close(void);
+
+#endif /* _NTNIC_VDPA_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c
new file mode 100644
index 0000000000..0724b040c3
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+#include <rte_bus_pci.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "ntnic_ethdev.h"
+#include "ntnic_vf.h"
+#include "ntnic_vf_vdpa.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)
+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)
+
+static const char *get_adapter_name(struct rte_pci_device *pci_dev)
+{
+	switch (pci_dev->id.vendor_id) {
+	case NT_HW_NAPATECH_PCI_VENDOR_ID:
+		switch (pci_dev->id.device_id) {
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:
+			return "NT200A02";
+		case NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:
+			return "NT50B01";
+		}
+		break;
+	}
+
+	return "Unknown";
+}
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	const char *adapter_name _unused = get_adapter_name(pci_dev);
+
+	NT_LOG(INF, VDPA, "Probe %s VF : %02x:%02x:%i\n", adapter_name,
+	       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+	/* Create vDPA device for the virtual function interface.*/
+
+	if (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)
+		return -1;
+
+	return nthw_create_vf_interface_dpdk(pci_dev);
+}
+
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	if (ntvf_vdpa_pci_remove(pci_dev) != 0)
+		return -1;
+
+	return nthw_remove_vf_interface_dpdk(pci_dev);
+}
+
+static const struct rte_pci_id pci_id_nt_vf_map[] = {
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)
+	},
+	{	RTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,
+			       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)
+	},
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_nt_vf = {
+	.id_table = pci_id_nt_vf_map,
+	.drv_flags = 0,
+	.probe = nt_vf_pci_probe,
+	.remove = nt_vf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);
+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, "* vfio-pci");
diff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h
new file mode 100644
index 0000000000..84be3bd71f
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VF_H_
+#define _NTNIC_VF_H_
+
+#include "rte_bus_pci.h"
+
+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		    struct rte_pci_device *pci_dev);
+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);
+
+int get_container_fd(int vf_num);
+int close_vf_mem_mapping(int vf_num);
+
+#endif /* _NTNIC_VF_H_ */
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c
new file mode 100644
index 0000000000..4125bc50c9
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c
@@ -0,0 +1,1235 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <linux/virtio_net.h>
+#include <linux/pci_regs.h>
+
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include <vhost.h>
+
+#include "ntdrv_4ga.h"
+#include "ntnic_ethdev.h"
+#include "ntnic_vdpa.h"
+#include "ntnic_vf_vdpa.h"
+#include "ntnic_vf.h"
+#include "ntnic_vfio.h"
+#include "ntnic_dbsconfig.h"
+#include "ntlog.h"
+
+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)
+#define NTVF_VDPA_MAX_INTR_VECTORS 8
+
+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \
+	((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \
+	 (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \
+	 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \
+	 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \
+	 (1ULL << VHOST_USER_PROTOCOL_F_MQ))
+
+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \
+	((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \
+	 (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \
+	 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \
+	 (1ULL << VIRTIO_F_RING_PACKED) |                                  \
+	 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \
+	 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
+
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);
+
+struct vring_info {
+	uint64_t desc;
+	uint64_t avail;
+	uint64_t used;
+	uint16_t size;
+
+	uint16_t last_avail_idx;
+	uint16_t last_used_idx;
+
+	int vq_type;
+	struct nthw_virt_queue *p_vq;
+
+	int enable;
+};
+
+struct ntvf_vdpa_hw {
+	uint64_t negotiated_features;
+
+	uint8_t nr_vring;
+
+	struct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];
+};
+
+struct ntvf_vdpa_internal {
+	struct rte_pci_device *pdev;
+	struct rte_vdpa_device *vdev;
+
+	int vfio_container_fd;
+	int vfio_group_fd;
+	int vfio_dev_fd;
+
+	int vid;
+
+	uint32_t outport;
+
+	uint16_t max_queues;
+
+	uint64_t features;
+
+	struct ntvf_vdpa_hw hw;
+
+	volatile int32_t started;
+	volatile int32_t dev_attached;
+	volatile int32_t running;
+
+	rte_spinlock_t lock;
+
+	volatile int32_t dma_mapped;
+	volatile int32_t intr_enabled;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NTVF_USED_RING_LEN(size) \
+	((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+#define NTVF_MEDIATED_VRING 0x210000000000
+
+struct internal_list {
+	TAILQ_ENTRY(internal_list) next;
+	struct ntvf_vdpa_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+
+static struct internal_list_head internal_list =
+	TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+int ntvf_vdpa_logtype;
+
+static struct internal_list *
+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (vdev == list->internal->vdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static struct internal_list *
+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)
+{
+	int found = 0;
+	struct internal_list *list;
+
+	NT_LOG(DBG, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+
+	pthread_mutex_lock(&internal_list_lock);
+
+	TAILQ_FOREACH(list, &internal_list, next)
+	{
+		if (pdev == list->internal->pdev) {
+			found = 1;
+			break;
+		}
+	}
+
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (!found)
+		return NULL;
+
+	return list;
+}
+
+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)
+{
+	int vfio;
+
+	LOG_FUNC_ENTER();
+
+	internal->vfio_dev_fd = -1;
+	internal->vfio_group_fd = -1;
+	internal->vfio_container_fd = -1;
+
+	vfio = nt_vfio_setup(internal->pdev);
+	if (vfio == -1) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__, __LINE__);
+		return -1;
+	}
+	internal->vfio_container_fd = nt_vfio_get_container_fd(vfio);
+	internal->vfio_group_fd = nt_vfio_get_group_fd(vfio);
+	internal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);
+	return 0;
+}
+
+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)
+{
+	uint32_t i;
+	int ret = 0;
+	struct rte_vhost_memory *mem = NULL;
+	int vf_num = nt_vfio_vf_num(internal->pdev);
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(DBG, VDPA, "%s: vid=%d vDPA dev=%p\n", __func__, internal->vid,
+	       internal->vdev);
+
+	if ((do_map && __atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED)) ||
+			(!do_map && !__atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED))) {
+		ret = -1;
+		goto exit;
+	}
+	ret = rte_vhost_get_mem_table(internal->vid, &mem);
+	if (ret < 0) {
+		NT_LOG(ERR, VDPA, "failed to get VM memory layout.\n");
+		goto exit;
+	}
+
+	for (i = 0; i < mem->nregions; i++) {
+		struct rte_vhost_mem_region *reg = &mem->regions[i];
+
+		NT_LOG(INF, VDPA,
+		       "%s, region %u: HVA 0x%" PRIX64 ", GPA 0xllx, size 0x%" PRIX64 ".\n",
+		       (do_map ? "DMA map" : "DMA unmap"), i,
+		       reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+		if (do_map) {
+			ret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,
+						   reg->guest_phys_addr,
+						   reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA map failed.\n",
+				       __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 1, __ATOMIC_RELAXED);
+		} else {
+			ret = nt_vfio_dma_unmap_vdpa(vf_num,
+						     reg->host_user_addr,
+						     reg->guest_phys_addr,
+						     reg->size);
+			if (ret < 0) {
+				NT_LOG(ERR, VDPA, "%s: DMA unmap failed.\n", __func__);
+				goto exit;
+			}
+			__atomic_store_n(&internal->dma_mapped, 0, __ATOMIC_RELAXED);
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	LOG_FUNC_LEAVE();
+	return ret;
+}
+
+static uint64_t _hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *mem = NULL;
+	struct rte_vhost_mem_region *reg;
+	uint64_t gpa = 0;
+	uint32_t i;
+
+	if (rte_vhost_get_mem_table(vid, &mem) < 0)
+		goto exit;
+
+	for (i = 0; i < mem->nregions; i++) {
+		reg = &mem->regions[i];
+		if (hva >= reg->host_user_addr &&
+				hva < reg->host_user_addr + reg->size) {
+			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
+			break;
+		}
+	}
+
+exit:
+	if (mem)
+		free(mem);
+
+	return gpa;
+}
+
+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,
+				  int vring)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	struct rte_vhost_vring vq;
+	int vid = internal->vid;
+	uint64_t gpa;
+
+	rte_vhost_get_vhost_vring(vid, vring, &vq);
+
+	NT_LOG(INF, VDPA, "%s: idx=%d: vq.desc %p\n", __func__, vring, vq.desc);
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\n",
+		       __func__, vring, vq.desc);
+		return -1;
+	}
+	hw->vring[vring].desc = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA,
+		       "%s: idx=%d: failed to get GPA for available ring\n",
+		       __func__, vring);
+		return -1;
+	}
+	hw->vring[vring].avail = gpa;
+
+	gpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		NT_LOG(ERR, VDPA, "%s: idx=%d: fail to get GPA for used ring\n",
+		       __func__, vring);
+		return -1;
+	}
+
+	hw->vring[vring].used = gpa;
+	hw->vring[vring].size = vq.size;
+
+	rte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,
+				 &hw->vring[vring].last_used_idx);
+
+	/* Prevent multiple creations */
+	{
+		const int index = vring;
+		uint32_t hw_index = 0;
+		uint32_t host_id = 0;
+		const uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */
+		uint32_t vport = 0;
+		uint32_t port = internal->outport;
+		struct vring_info *p_vr_inf = &hw->vring[vring];
+		nthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);
+
+		int res = nthw_vdpa_get_queue_id_info(internal->vdev,
+						      !(vring & 1), vring >> 1,
+						      &hw_index, &host_id,
+						      &vport);
+		if (res) {
+			NT_LOG(ERR, VDPA, "HW info received failed\n");
+			p_vr_inf->p_vq = NULL; /* Failed to create the vring */
+			return res;
+		}
+
+		if (!(vring & 1)) {
+			NT_LOG(DBG, VDPA,
+			       "Rx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		} else {
+			NT_LOG(DBG, VDPA,
+			       "Tx: idx %u, host_id %u, vport %u, queue %i\n",
+			       hw_index, host_id, vport, vring >> 1);
+		}
+		NT_LOG(DBG, VDPA,
+		       "%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\n",
+		       __func__, index, (void *)p_vr_inf->avail,
+		       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,
+		       p_vr_inf->size, host_id, port, header);
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			int res;
+
+			NT_LOG(DBG, VDPA,
+			       "%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+
+			if (!(vring & 1)) {
+				struct nthw_virt_queue *rx_vq;
+
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr =
+					(start_idx & 0x7fff) % vq.size;
+
+				/* disable doorbell not needed by FPGA */
+				((struct pvirtq_event_suppress *)vq.used)
+				->flags = RING_EVENT_FLAGS_DISABLE;
+				rte_wmb();
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					NT_LOG(DBG, VDPA,
+					       "Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\n",
+					       hw_index, host_id, start_idx,
+					       header, vring, vport);
+					/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf
+						->avail, /* -> driver_event */
+						(void *)p_vr_inf
+						->used, /* -> device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						PACKED_RING,
+						vring + 1);
+
+				} else {
+					rx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, header,
+						SPLIT_RING,
+						-1); /* no interrupt enabled */
+				}
+
+				p_vr_inf->p_vq = rx_vq;
+				p_vr_inf->vq_type = 0;
+				res = (rx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(rx_vq,
+									1, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Rx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else if (vring & 1) {
+				/*
+				 * transmit virt queue
+				 */
+				struct nthw_virt_queue *tx_vq;
+				uint16_t start_idx =
+					hw->vring[vring].last_avail_idx;
+				uint16_t next_ptr;
+
+				if (hw->negotiated_features &
+						(1ULL << VIRTIO_F_RING_PACKED)) {
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+
+					/* disable doorbell needs from FPGA */
+					((struct pvirtq_event_suppress *)vq.used)
+					->flags =
+						RING_EVENT_FLAGS_DISABLE;
+					rte_wmb();
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail, /* driver_event */
+						(void *)p_vr_inf->used, /* device_event */
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, PACKED_RING,
+						vring + 1, /* interrupt 2,4,6... */
+						!!(hw->negotiated_features &
+							(1ULL << VIRTIO_F_IN_ORDER)));
+
+				} else {
+					/*
+					 * In Live Migration restart scenario:
+					 * This only works if no jumbo packets has been send from VM
+					 * on the LM source sideÑŽ This pointer points to the next
+					 * free descr and may be pushed ahead by next flag and if
+					 * so, this pointer calculation is incorrect
+					 *
+					 * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS
+					 *       SUPPORT IN VM
+					 */
+					next_ptr =
+						(start_idx & 0x7fff) % vq.size;
+					tx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,
+						hw_index, start_idx,
+						next_ptr,
+						(void *)p_vr_inf->avail,
+						(void *)p_vr_inf->used,
+						(void *)p_vr_inf->desc,
+						p_vr_inf->size, host_id, port,
+						vport, header, SPLIT_RING,
+						-1, /* no interrupt enabled */
+						IN_ORDER);
+				}
+
+				p_vr_inf->p_vq = tx_vq;
+				p_vr_inf->vq_type = 1;
+				res = (tx_vq ? 0 : -1);
+				if (res == 0)
+					register_release_virtqueue_info(tx_vq,
+									0, 0);
+
+				NT_LOG(DBG, VDPA, "[%i] Tx Queue size %i\n",
+				       hw_index, p_vr_inf->size);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: unexpected index: %d\n",
+				       __func__, index, vring);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA,
+				       "%s: idx=%d: vring error: res=%d\n",
+				       __func__, index, res);
+			}
+
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, index, hw->negotiated_features);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)
+{
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	int vid;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+	hw->nr_vring = rte_vhost_get_vring_num(vid);
+	rte_vhost_get_negotiated_features(vid, &hw->negotiated_features);
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		NT_LOG(INF, VDPA, "%s: Number of VRINGs=%u\n", __func__,
+		       hw->nr_vring);
+
+		for (int i = 0; i < hw->nr_vring && i < 2; i++) {
+			if (!hw->vring[i].enable) {
+				ntvf_vdpa_dma_map(internal, 1);
+				ntvf_vdpa_create_vring(internal, i);
+				if (hw->vring[i].desc && hw->vring[i].p_vq) {
+					if (hw->vring[i].vq_type == 0)
+						nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+					else
+						nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+					hw->vring[i].enable = 1;
+				}
+			}
+		}
+	} else {
+		/*
+		 * Initially vring 0 must be enabled/created here - it is not later
+		 * enabled in vring state
+		 */
+		if (!hw->vring[0].enable) {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, 0);
+			hw->vring[0].enable = 1;
+		}
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)
+{
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+	uint64_t features;
+	uint32_t i;
+	int vid;
+	int res;
+
+	LOG_FUNC_ENTER();
+
+	vid = internal->vid;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+					 hw->vring[i].last_used_idx);
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		struct vring_info *p_vr_inf = &hw->vring[i];
+
+		if ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||
+				(hw->negotiated_features &
+				 (1ULL << VIRTIO_F_RING_PACKED))) {
+			NT_LOG(DBG, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+			if (p_vr_inf->vq_type == 0) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_rx_virt_queue(p_vr_inf->p_vq);
+			} else if (p_vr_inf->vq_type == 1) {
+				de_register_release_virtqueue_info(p_vr_inf->p_vq);
+				res = nthw_release_tx_virt_queue(p_vr_inf->p_vq);
+			} else {
+				NT_LOG(ERR, VDPA,
+				       "%s: vring #%d: unknown type %d\n",
+				       __func__, i, p_vr_inf->vq_type);
+				res = -1;
+			}
+			if (res != 0) {
+				NT_LOG(ERR, VDPA, "%s: vring #%d: res=%d\n",
+				       __func__, i, res);
+			}
+		} else {
+			NT_LOG(WRN, VDPA,
+			       "%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\n",
+			       __func__, hw->negotiated_features);
+		}
+		p_vr_inf->desc = 0UL;
+	}
+
+	if (RTE_VHOST_NEED_LOG(features)) {
+		NT_LOG(WRN, VDPA,
+		       "%s: vid %d: vhost logging feature needed - currently not supported\n",
+		       __func__, vid);
+	}
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+#define MSIX_IRQ_SET_BUF_LEN           \
+	(sizeof(struct vfio_irq_set) + \
+	 sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)
+
+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	uint32_t i, nr_vring;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int *fd_ptr;
+	struct rte_vhost_vring vring;
+
+	if (__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+
+	LOG_FUNC_ENTER();
+	vring.callfd = -1;
+
+	nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+	NT_LOG(INF, VDPA,
+	       "Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\n",
+	       nr_vring, internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	if (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {
+		NT_LOG(WRN, VDPA,
+		       "Can't enable MSI interrupts. Too many vectors requested: "
+		       "%i (max: %i) only poll mode drivers will work",
+		       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);
+		/*
+		 * Return success, because polling drivers in VM still works without
+		 * interrupts (i.e. DPDK PMDs)
+		 */
+		return 0;
+	}
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = nr_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	fd_ptr = (int *)&irq_set->data;
+
+	fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;
+
+	for (i = 0; i < nr_vring; i += 2) {
+		rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+
+		rte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);
+		fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;
+	}
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error enabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 1, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len;
+
+	if (!__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))
+		return 0;
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "Disable VFIO interrupt on VID %i (%02x:%02x.%x)\n",
+	       internal->vid, internal->pdev->addr.bus,
+	       internal->pdev->addr.devid, internal->pdev->addr.function);
+
+	len = sizeof(struct vfio_irq_set);
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret) {
+		NT_LOG(ERR, VDPA, "Error disabling MSI-X interrupts: %s",
+		       strerror(errno));
+		return -1;
+	}
+
+	__atomic_store_n(&internal->intr_enabled, 0, __ATOMIC_RELAXED);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)
+{
+	int ret;
+
+	LOG_FUNC_ENTER();
+
+	rte_spinlock_lock(&internal->lock);
+
+	if (!__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(__atomic_load_n(&internal->started, __ATOMIC_RELAXED) &&
+			 __atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: [%s:%u] start\n", __func__, __FILE__,
+			       __LINE__);
+
+		ret = ntvf_vdpa_start(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 1, __ATOMIC_RELAXED);
+	} else if (__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&
+			(!__atomic_load_n(&internal->started, __ATOMIC_RELAXED) ||
+			 !__atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {
+		NT_LOG(DBG, VDPA, "%s: stop\n", __func__);
+
+		ret = ntvf_vdpa_stop(internal);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		ret = ntvf_vdpa_disable_vfio_intr(internal);
+		if (ret) {
+			goto err;
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+		}
+
+		ret = ntvf_vdpa_dma_map(internal, 0);
+		if (ret) {
+			NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+			       __LINE__);
+			goto err;
+		}
+
+		__atomic_store_n(&internal->running, 0, __ATOMIC_RELAXED);
+	} else {
+		NT_LOG(INF, VDPA, "%s: unhandled state [%s:%u]\n", __func__,
+		       __FILE__, __LINE__);
+	}
+
+	rte_spinlock_unlock(&internal->lock);
+	LOG_FUNC_LEAVE();
+	return 0;
+
+err:
+	rte_spinlock_unlock(&internal->lock);
+	NT_LOG(ERR, VDPA, "%s: leave [%s:%u]\n", __func__, __FILE__, __LINE__);
+	return ret;
+}
+
+static int ntvf_vdpa_dev_config(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	internal->vid = vid;
+
+	__atomic_store_n(&internal->dev_attached, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_dev_close(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+	struct ntvf_vdpa_internal *internal;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+
+	__atomic_store_n(&internal->dev_attached, 0, __ATOMIC_RELAXED);
+	ntvf_vdpa_update_datapath(internal);
+
+	/* Invalidate the virt queue pointers */
+	uint32_t i;
+	struct ntvf_vdpa_hw *hw = &internal->hw;
+
+	for (i = 0; i < hw->nr_vring; i++)
+		hw->vring[i].p_vq = NULL;
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_set_features(int vid)
+{
+	uint64_t features;
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	rte_vhost_get_negotiated_features(vid, &features);
+	NT_LOG(DBG, VDPA, "%s: vid %d: vDPA dev %p: features=0x%016lX\n",
+	       __func__, vid, vdev, features);
+
+	if (!RTE_VHOST_NEED_LOG(features))
+		return 0;
+
+	NT_LOG(INF, VDPA,
+	       "%s: Starting Live Migration for vid=%d vDPA dev=%p\n", __func__,
+	       vid, vdev);
+
+	/* Relay core feature not present. We cannot do live migration then. */
+	NT_LOG(ERR, VDPA,
+	       "%s: Live Migration not possible. Relay core feature required.\n",
+	       __func__);
+	return -1;
+}
+
+static int ntvf_vdpa_get_vfio_group_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_group_fd;
+}
+
+static int ntvf_vdpa_get_vfio_device_fd(int vid)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	LOG_FUNC_LEAVE();
+	return list->internal->vfio_dev_fd;
+}
+
+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,
+				   uint32_t *queue_num)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+	*queue_num = list->internal->max_queues;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p queue_num=%d\n", __func__, vdev,
+	       *queue_num);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,
+				       uint64_t *features)
+{
+	struct internal_list *list;
+
+	LOG_FUNC_ENTER();
+
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device : %p\n", __func__, vdev);
+		return -1;
+	}
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = list->internal->features;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int
+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,
+				uint64_t *features)
+{
+	LOG_FUNC_ENTER();
+
+	if (!features) {
+		NT_LOG(ERR, VDPA, "%s: vDPA dev=%p: no ptr to feature\n",
+		       __func__, vdev);
+		return -1;
+	}
+
+	*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p: features=0x%016lX\n", __func__,
+	       vdev, *features);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,
+	struct ntvf_vdpa_internal *internal)
+{
+	int ret = 0;
+
+	ret = ntvf_vdpa_enable_vfio_intr(internal);
+	if (ret) {
+		printf("ERROR - ENABLE INTERRUPT via VFIO\n");
+		return ret;
+	}
+	/* Enable Rx and Tx for all vrings */
+	for (int i = 0; i < hw->nr_vring; i++) {
+		if (i & 1)
+			nthw_enable_tx_virt_queue(hw->vring[i].p_vq);
+		else
+			nthw_enable_rx_virt_queue(hw->vring[i].p_vq);
+	}
+	return ret;
+}
+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)
+{
+	struct rte_vdpa_device *vdev;
+	struct internal_list *list;
+
+	struct ntvf_vdpa_internal *internal;
+	struct ntvf_vdpa_hw *hw;
+	int ret = 0;
+
+	LOG_FUNC_ENTER();
+
+	vdev = rte_vhost_get_vdpa_device(vid);
+	list = find_internal_resource_by_vdev(vdev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "Invalid vDPA device: %p", vdev);
+		return -1;
+	}
+
+	internal = list->internal;
+	if (vring < 0 || vring >= internal->max_queues * 2) {
+		NT_LOG(ERR, VDPA, "Vring index %d not correct", vring);
+		return -1;
+	}
+
+	hw = &internal->hw;
+	enum fpga_info_profile fpga_profile =
+		get_fpga_profile_from_pci(internal->pdev->addr);
+
+	if (!state && hw->vring[vring].enable) {
+		/* Disable vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_disable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_disable_tx_virt_queue(hw->vring[vring].p_vq);
+		}
+	}
+
+	if (state && !hw->vring[vring].enable) {
+		/* Enable/Create vring */
+		if (hw->vring[vring].desc && hw->vring[vring].p_vq) {
+			if (hw->vring[vring].vq_type == 0)
+				nthw_enable_rx_virt_queue(hw->vring[vring].p_vq);
+			else
+				nthw_enable_tx_virt_queue(hw->vring[vring].p_vq);
+		} else {
+			ntvf_vdpa_dma_map(internal, 1);
+			ntvf_vdpa_create_vring(internal, vring);
+
+			if (fpga_profile != FPGA_INFO_PROFILE_INLINE) {
+				/*
+				 * After last vq enable VFIO interrupt IOMMU re-mapping and enable
+				 * FPGA Rx/Tx
+				 */
+				if (vring == hw->nr_vring - 1) {
+					ret = ntvf_vdpa_configure_queue(hw, internal);
+					if (ret)
+						return ret;
+				}
+			}
+		}
+	}
+
+	if (fpga_profile == FPGA_INFO_PROFILE_INLINE) {
+		hw->vring[vring].enable = !!state;
+		/* after last vq enable VFIO interrupt IOMMU re-mapping */
+		if (hw->vring[vring].enable && vring == hw->nr_vring - 1) {
+			ret = ntvf_vdpa_configure_queue(hw, internal);
+			if (ret)
+				return ret;
+		}
+	} else {
+		hw->vring[vring].enable = !!state;
+	}
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {
+	.get_queue_num = ntvf_vdpa_get_queue_num,
+	.get_features = ntvf_vdpa_get_vdpa_features,
+	.get_protocol_features = ntvf_vdpa_get_protocol_features,
+	.dev_conf = ntvf_vdpa_dev_config,
+	.dev_close = ntvf_vdpa_dev_close,
+	.set_vring_state = ntvf_vdpa_set_vring_state,
+	.set_features = ntvf_vdpa_set_features,
+	.migration_done = NULL,
+	.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,
+	.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,
+	.get_notify_area = NULL,
+};
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal = NULL;
+	struct internal_list *list = NULL;
+	enum fpga_info_profile fpga_profile;
+
+	LOG_FUNC_ENTER();
+
+	NT_LOG(INF, VDPA, "%s: [%s:%u] %04x:%02x:%02x.%x\n", __func__, __FILE__,
+	       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,
+	       pci_dev->addr.devid, pci_dev->addr.function);
+	list = rte_zmalloc("ntvf_vdpa", sizeof(*list), 0);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal = rte_zmalloc("ntvf_vdpa", sizeof(*internal), 0);
+	if (internal == NULL) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		goto error;
+	}
+
+	internal->pdev = pci_dev;
+	rte_spinlock_init(&internal->lock);
+
+	if (ntvf_vdpa_vfio_setup(internal) < 0) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u]\n", __func__, __FILE__,
+		       __LINE__);
+		return -1;
+	}
+
+	internal->max_queues = NTVF_VDPA_MAX_QUEUES;
+
+	internal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;
+
+	NT_LOG(DBG, VDPA, "%s: masked features=0x%016lX [%s:%u]\n", __func__,
+	       internal->features, __FILE__, __LINE__);
+
+	fpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);
+	if (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {
+		internal->outport = 0;
+	} else {
+		/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */
+		internal->outport = internal->pdev->addr.function & 1;
+	}
+
+	list->internal = internal;
+
+	internal->vdev =
+		rte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);
+	NT_LOG(DBG, VDPA, "%s: vDPA dev=%p\n", __func__, internal->vdev);
+
+	if (!internal->vdev) {
+		NT_LOG(ERR, VDPA, "%s: [%s:%u] Register vDPA device failed\n",
+		       __func__, __FILE__, __LINE__);
+		goto error;
+	}
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_INSERT_TAIL(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	__atomic_store_n(&internal->started, 1, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+
+error:
+	rte_free(list);
+	rte_free(internal);
+	return -1;
+}
+
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct ntvf_vdpa_internal *internal;
+	struct internal_list *list;
+	int vf_num = nt_vfio_vf_num(pci_dev);
+
+	LOG_FUNC_ENTER();
+	list = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);
+	if (list == NULL) {
+		NT_LOG(ERR, VDPA, "%s: Invalid device: %s", __func__,
+		       pci_dev->name);
+		return -1;
+	}
+
+	internal = list->internal;
+	__atomic_store_n(&internal->started, 0, __ATOMIC_RELAXED);
+
+	ntvf_vdpa_update_datapath(internal);
+
+	rte_pci_unmap_device(internal->pdev);
+	nt_vfio_remove(vf_num);
+	rte_vdpa_unregister_device(internal->vdev);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	rte_free(list);
+	rte_free(internal);
+
+	LOG_FUNC_LEAVE();
+	return 0;
+}
+
+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver rte_ntvf_vdpa = {
+	.id_table = pci_id_ntvf_vdpa_map,
+	.drv_flags = 0,
+	.probe = ntvf_vdpa_pci_probe,
+	.remove = ntvf_vdpa_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, "* vfio-pci");
+
diff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h
new file mode 100644
index 0000000000..561e3bf7cf
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __NTNIC_VF_VDPA_H__
+#define __NTNIC_VF_VDPA_H__
+
+extern int ntvf_vdpa_logtype;
+
+#define LOG_FUNC_TRACE
+#ifdef LOG_FUNC_TRACE
+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, "%s: enter\n", __func__)
+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, "%s: leave\n", __func__)
+#else
+#define LOG_FUNC_ENTER()
+#define LOG_FUNC_LEAVE()
+#endif
+
+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+			struct rte_pci_device *pci_dev);
+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);
+
+void ntvf_vdpa_reset_hw(int vid);
+
+#endif /* __NTNIC_VF_VDPA_H__ */
diff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c
new file mode 100644
index 0000000000..1390383c55
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <sys/ioctl.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_vfio.h>
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include <ntlog.h>
+#include <nt_util.h>
+#include "ntnic_vfio.h"
+
+#define ONE_G_SIZE 0x40000000
+#define ONE_G_MASK (ONE_G_SIZE - 1)
+#define START_VF_IOVA 0x220000000000
+
+int nt_vfio_vf_num(const struct rte_pci_device *pdev)
+{
+	return ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);
+}
+
+/* Internal API */
+struct vfio_dev {
+	int container_fd;
+	int group_fd;
+	int dev_fd;
+	uint64_t iova_addr;
+};
+
+static struct vfio_dev vfio_list[256];
+
+static struct vfio_dev *vfio_get(int vf_num)
+{
+	if (vf_num < 0 || vf_num > 255)
+		return NULL;
+	return &vfio_list[vf_num];
+}
+
+/* External API */
+int nt_vfio_setup(struct rte_pci_device *dev)
+{
+	char devname[RTE_DEV_NAME_MAX_LEN] = { 0 };
+	int iommu_group_num;
+	int vf_num;
+	struct vfio_dev *vfio;
+
+	NT_LOG(INF, ETHDEV, "NT VFIO device setup %s\n", dev->name);
+
+	vf_num = nt_vfio_vf_num(dev);
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. Illegal device id\n");
+		return -1;
+	}
+
+	vfio->dev_fd = -1;
+	vfio->group_fd = -1;
+	vfio->container_fd = -1;
+	vfio->iova_addr = START_VF_IOVA;
+
+	rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+	rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+			       &iommu_group_num);
+
+	if (vf_num == 0) {
+		/* use default container for pf0 */
+		vfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;
+	} else {
+		vfio->container_fd = rte_vfio_container_create();
+		if (vfio->container_fd < 0) {
+			NT_LOG(ERR, ETHDEV,
+			       "VFIO device setup failed. VFIO container creation failed.\n");
+			return -1;
+		}
+	}
+
+	vfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,
+			 iommu_group_num);
+	if (vfio->group_fd < 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device setup failed. VFIO container group bind failed.\n");
+		goto err;
+	}
+
+	if (vf_num > 0) {
+		if (rte_pci_map_device(dev)) {
+			NT_LOG(ERR, ETHDEV,
+			       "Map VFIO device failed. is the vfio-pci driver loaded?\n");
+			goto err;
+		}
+	}
+
+	vfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
+
+	NT_LOG(DBG, ETHDEV,
+	       "%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\n",
+	       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,
+	       vfio->group_fd, iommu_group_num);
+
+	return vf_num;
+
+err:
+	if (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)
+		rte_vfio_container_destroy(vfio->container_fd);
+	return -1;
+}
+
+int nt_vfio_remove(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	NT_LOG(DBG, ETHDEV, "NT VFIO device remove VF=%d\n", vf_num);
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+
+	rte_vfio_container_destroy(vfio->container_fd);
+	return 0;
+}
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size)
+{
+	uint64_t gp_virt_base;
+	uint64_t gp_offset;
+
+	if (size == ONE_G_SIZE) {
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+		gp_offset = 0;
+	}
+
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO MMAP VF=%d VirtAddr=%" PRIX64 " HPA=%" PRIX64
+	       " VirtBase=%" PRIX64 " IOVA Addr=%" PRIX64 " size=%d\n",
+	       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,
+	       vfio->iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,
+					     vfio->iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV, "VFIO MMAP res %i, container_fd %i, vf_num %i\n",
+	       res, vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	*iova_addr = vfio->iova_addr + gp_offset;
+
+	vfio->iova_addr += ONE_G_SIZE;
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size)
+{
+	uint64_t gp_virt_base;
+	struct vfio_dev *vfio;
+
+	if (size == ONE_G_SIZE) {
+		uint64_t gp_offset;
+
+		gp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;
+		gp_offset = (uint64_t)virt_addr & ONE_G_MASK;
+		iova_addr -= gp_offset;
+	} else {
+		gp_virt_base = (uint64_t)virt_addr;
+	}
+
+	vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+
+	if (vfio->container_fd == -1)
+		return 0;
+
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%" PRIX64
+		       ", IOVA=%" PRIX64 ", size=%i\n",
+		       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,
+		       (int)size);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* vDPA mapping with Guest Phy addresses as IOVA */
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO MAP: VF number %d invalid\n", vf_num);
+		return -1;
+	}
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP VF=%d VirtAddr=%" PRIX64 " IOVA Addr=%" PRIX64
+	       " size=%d\n",
+	       vf_num, virt_addr, iova_addr, size);
+
+	int res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,
+					     iova_addr, size);
+
+	NT_LOG(DBG, ETHDEV,
+	       "VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\n", res,
+	       vfio->container_fd, vf_num);
+	if (res) {
+		NT_LOG(ERR, ETHDEV,
+		       "rte_vfio_container_dma_map failed: res %d\n", res);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size)
+{
+	struct vfio_dev *vfio = vfio_get(vf_num);
+
+	if (vfio == NULL) {
+		NT_LOG(ERR, ETHDEV, "VFIO vDPA UNMAP: VF number %d invalid\n",
+		       vf_num);
+		return -1;
+	}
+	int res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,
+					       iova_addr, size);
+	if (res != 0) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\n",
+		       res, vfio->container_fd, vf_num);
+		return -1;
+	}
+
+	return 0;
+}
+
+int nt_vfio_get_container_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->container_fd;
+}
+
+int nt_vfio_get_group_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->group_fd;
+}
+
+int nt_vfio_get_dev_fd(int vf_num)
+{
+	struct vfio_dev *vfio;
+
+	vfio = vfio_get(vf_num);
+	if (!vfio) {
+		NT_LOG(ERR, ETHDEV,
+		       "VFIO device remove failed. Illegal device id\n");
+		return -1;
+	}
+	return vfio->dev_fd;
+}
+
+/* Internal init */
+
+RTE_INIT(nt_vfio_init);
+
+static void nt_vfio_init(void)
+{
+	struct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,
+		       .vfio_dma_unmap = nt_vfio_dma_unmap
+	};
+	nt_util_vfio_init(&s);
+}
diff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h
new file mode 100644
index 0000000000..5d8a63d364
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_vfio.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTNIC_VFIO_H_
+#define _NTNIC_VFIO_H_
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+
+int nt_vfio_setup(struct rte_pci_device *dev);
+int nt_vfio_remove(int vf_num);
+
+int nt_vfio_get_container_fd(int vf_num);
+int nt_vfio_get_group_fd(int vf_num);
+int nt_vfio_get_dev_fd(int vf_num);
+
+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,
+		    uint64_t size);
+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,
+		      uint64_t size);
+
+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			 uint64_t size);
+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,
+			   uint64_t size);
+
+/* Find device (PF/VF) number from device address */
+int nt_vfio_vf_num(const struct rte_pci_device *dev);
+#endif /* _NTNIC_VFIO_H_ */
diff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c
new file mode 100644
index 0000000000..c0e67ba03d
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+
+#include "ntdrv_4ga.h"
+#include "ntlog.h"
+#include "nthw_drv.h"
+#include "nthw_fpga.h"
+#include "ntnic_xstats.h"
+
+#define UNUSED __rte_unused
+
+struct rte_nthw_xstats_names_s {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	uint8_t source;
+	unsigned int offset;
+};
+
+/*
+ * Extended stat for VSwitch
+ */
+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {
+	{ "rx_octets", 1, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"rx_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"rx_qos_dropped_bytes", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"rx_qos_dropped_packets", 1,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+	{ "tx_octets", 2, offsetof(struct port_counters_vswitch_v1, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_vswitch_v1, pkts) },
+	{	"tx_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, drop_events)
+	},
+	{	"tx_qos_dropped_bytes", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_octets)
+	},
+	{	"tx_qos_dropped_packets", 2,
+		offsetof(struct port_counters_vswitch_v1, qos_drop_pkts)
+	},
+};
+
+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.17
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+};
+
+/*
+ * Extended stat for Capture/Inline - implements RMON
+ * FLM 0.18
+ */
+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {
+	{ "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) },
+	{ "rx_octets", 1, offsetof(struct port_counters_v2, octets) },
+	{ "rx_packets", 1, offsetof(struct port_counters_v2, pkts) },
+	{	"rx_broadcast_packets", 1,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"rx_multicast_packets", 1,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"rx_unicast_packets", 1,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"rx_align_errors", 1,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"rx_code_violation_errors", 1,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"rx_undersize_packets", 1,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"rx_oversize_packets", 1,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) },
+	{	"rx_jabbers_not_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"rx_jabbers_truncated", 1,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"rx_size_64_packets", 1,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"rx_size_65_to_127_packets", 1,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"rx_size_128_to_255_packets", 1,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"rx_size_256_to_511_packets", 1,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"rx_size_512_to_1023_packets", 1,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"rx_size_1024_to_1518_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"rx_size_1519_to_2047_packets", 1,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"rx_size_2048_to_4095_packets", 1,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"rx_size_4096_to_8191_packets", 1,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"rx_size_8192_to_max_packets", 1,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+	{	"rx_ip_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_ip_chksum_error)
+	},
+	{	"rx_udp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_udp_chksum_error)
+	},
+	{	"rx_tcp_checksum_error", 1,
+		offsetof(struct port_counters_v2, pkts_tcp_chksum_error)
+	},
+
+	{ "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) },
+	{ "tx_octets", 2, offsetof(struct port_counters_v2, octets) },
+	{ "tx_packets", 2, offsetof(struct port_counters_v2, pkts) },
+	{	"tx_broadcast_packets", 2,
+		offsetof(struct port_counters_v2, broadcast_pkts)
+	},
+	{	"tx_multicast_packets", 2,
+		offsetof(struct port_counters_v2, multicast_pkts)
+	},
+	{	"tx_unicast_packets", 2,
+		offsetof(struct port_counters_v2, unicast_pkts)
+	},
+	{	"tx_align_errors", 2,
+		offsetof(struct port_counters_v2, pkts_alignment)
+	},
+	{	"tx_code_violation_errors", 2,
+		offsetof(struct port_counters_v2, pkts_code_violation)
+	},
+	{ "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) },
+	{	"tx_undersize_packets", 2,
+		offsetof(struct port_counters_v2, undersize_pkts)
+	},
+	{	"tx_oversize_packets", 2,
+		offsetof(struct port_counters_v2, oversize_pkts)
+	},
+	{ "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) },
+	{	"tx_jabbers_not_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_not_truncated)
+	},
+	{	"tx_jabbers_truncated", 2,
+		offsetof(struct port_counters_v2, jabbers_truncated)
+	},
+	{	"tx_size_64_packets", 2,
+		offsetof(struct port_counters_v2, pkts_64_octets)
+	},
+	{	"tx_size_65_to_127_packets", 2,
+		offsetof(struct port_counters_v2, pkts_65_to_127_octets)
+	},
+	{	"tx_size_128_to_255_packets", 2,
+		offsetof(struct port_counters_v2, pkts_128_to_255_octets)
+	},
+	{	"tx_size_256_to_511_packets", 2,
+		offsetof(struct port_counters_v2, pkts_256_to_511_octets)
+	},
+	{	"tx_size_512_to_1023_packets", 2,
+		offsetof(struct port_counters_v2, pkts_512_to_1023_octets)
+	},
+	{	"tx_size_1024_to_1518_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1024_to_1518_octets)
+	},
+	{	"tx_size_1519_to_2047_packets", 2,
+		offsetof(struct port_counters_v2, pkts_1519_to_2047_octets)
+	},
+	{	"tx_size_2048_to_4095_packets", 2,
+		offsetof(struct port_counters_v2, pkts_2048_to_4095_octets)
+	},
+	{	"tx_size_4096_to_8191_packets", 2,
+		offsetof(struct port_counters_v2, pkts_4096_to_8191_octets)
+	},
+	{	"tx_size_8192_to_max_packets", 2,
+		offsetof(struct port_counters_v2, pkts_8192_to_max_octets)
+	},
+
+	/* FLM 0.17 */
+	{ "flm_count_current", 3, offsetof(struct flm_counters_v1, current) },
+	{	"flm_count_learn_done", 3,
+		offsetof(struct flm_counters_v1, learn_done)
+	},
+	{	"flm_count_learn_ignore", 3,
+		offsetof(struct flm_counters_v1, learn_ignore)
+	},
+	{	"flm_count_learn_fail", 3,
+		offsetof(struct flm_counters_v1, learn_fail)
+	},
+	{	"flm_count_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, unlearn_done)
+	},
+	{	"flm_count_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_done)
+	},
+	{	"flm_count_auto_unlearn_ignore", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_ignore)
+	},
+	{	"flm_count_auto_unlearn_fail", 3,
+		offsetof(struct flm_counters_v1, auto_unlearn_fail)
+	},
+	{	"flm_count_timeout_unlearn_done", 3,
+		offsetof(struct flm_counters_v1, timeout_unlearn_done)
+	},
+	{ "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) },
+	{	"flm_count_rel_ignore", 3,
+		offsetof(struct flm_counters_v1, rel_ignore)
+	},
+	{ "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) },
+	{	"flm_count_prb_ignore", 3,
+		offsetof(struct flm_counters_v1, prb_ignore)
+	},
+
+	/* FLM 0.20 */
+	{ "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) },
+	{ "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) },
+	{ "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) },
+	{ "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) },
+	{ "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) },
+	{ "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) },
+	{ "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) },
+	{ "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) },
+	{ "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) },
+	{ "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) },
+	{	"flm_count_cuc_start", 3,
+		offsetof(struct flm_counters_v1, cuc_start)
+	},
+	{ "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) },
+};
+
+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)
+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)
+
+/*
+ * Container for the reset values
+ */
+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \
+	NTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)
+
+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = {{ 0 }};
+
+
+/*
+ * These functions must only be called with stat mutex locked
+ */
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n && i < nb_names; i++) {
+		stats[i].id = i;
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			stats[i].value =
+				*((uint64_t *)&rx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 2:
+			/* TX stat */
+			stats[i].value =
+				*((uint64_t *)&tx_ptr[names[i].offset]) -
+				nthw_xstats_reset_val[port][i];
+			break;
+		case 3:
+			/* FLM stat */
+			if (flm_ptr) {
+				stats[i].value =
+					*((uint64_t *)&flm_ptr[names[i].offset]) -
+					nthw_xstats_reset_val[0][i];
+			} else {
+				stats[i].value = 0;
+			}
+			break;
+		default:
+			stats[i].value = 0;
+			break;
+		}
+	}
+
+	return i;
+}
+
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+	int count = 0;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		if (ids[i] < nb_names) {
+			switch (names[ids[i]].source) {
+			case 1:
+				/* RX stat */
+				values[i] =
+					*((uint64_t *)&rx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 2:
+				/* TX stat */
+				values[i] =
+					*((uint64_t *)&tx_ptr[names[ids[i]]
+							      .offset]) -
+					nthw_xstats_reset_val[port][ids[i]];
+				break;
+			case 3:
+				/* FLM stat */
+				if (flm_ptr) {
+					values[i] =
+						*((uint64_t *)&flm_ptr
+						  [names[ids[i]].offset]) -
+						nthw_xstats_reset_val[0][ids[i]];
+				} else {
+					values[i] = 0;
+				}
+				break;
+			default:
+				values[i] = 0;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)
+{
+	unsigned int i;
+	uint8_t *flm_ptr;
+	uint8_t *rx_ptr;
+	uint8_t *tx_ptr;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		flm_ptr = NULL;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;
+		rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];
+		tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	for (i = 0; i < nb_names; i++) {
+		switch (names[i].source) {
+		case 1:
+			/* RX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&rx_ptr[names[i].offset]);
+			break;
+		case 2:
+			/* TX stat */
+			nthw_xstats_reset_val[port][i] =
+				*((uint64_t *)&tx_ptr[names[i].offset]);
+			break;
+		case 3:
+			/*
+			 * FLM stat
+			 * Reset makes no sense for flm_count_current
+			 */
+			if (flm_ptr && strcmp(names[i].name, "flm_count_current") != 0) {
+				nthw_xstats_reset_val[0][i] =
+					*((uint64_t *)&flm_ptr[names[i].offset]);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * These functions does not require stat mutex locked
+ */
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size && i < nb_names; i++) {
+		strlcpy(xstats_names[i].name, names[i].name,
+			sizeof(xstats_names[i].name));
+		count++;
+	}
+
+	return count;
+}
+
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch)
+{
+	int count = 0;
+	unsigned int i;
+
+	uint32_t nb_names;
+	struct rte_nthw_xstats_names_s *names;
+
+	if (is_vswitch) {
+		names = nthw_virt_xstats_names;
+		nb_names = NTHW_VIRT_XSTATS_NAMES;
+	} else {
+		if (p_nt4ga_stat->flm_stat_ver < 18) {
+			names = nthw_cap_xstats_names_v1;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V1;
+		} else {
+			names = nthw_cap_xstats_names_v2;
+			nb_names = NTHW_CAP_XSTATS_NAMES_V2;
+		}
+	}
+
+	if (!xstats_names)
+		return nb_names;
+
+	for (i = 0; i < size; i++) {
+		if (ids[i] < nb_names) {
+			strlcpy(xstats_names[i].name, names[ids[i]].name,
+				RTE_ETH_XSTATS_NAME_SIZE);
+		}
+		count++;
+	}
+
+	return count;
+}
diff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h
new file mode 100644
index 0000000000..0a82a1a677
--- /dev/null
+++ b/drivers/net/ntnic/ntnic_xstats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_XSTATS_H_
+#define NTNIC_XSTATS_H_
+
+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,
+			  struct rte_eth_xstat_name *xstats_names,
+			  unsigned int size, bool is_vswitch);
+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,
+		    unsigned int n, bool is_vswitch, uint8_t port);
+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);
+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,
+				struct rte_eth_xstat_name *xstats_names,
+				const uint64_t *ids, unsigned int size,
+				bool is_vswitch);
+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,
+			  uint64_t *values, unsigned int n, bool is_vswitch,
+			  uint8_t port);
+
+#endif /* NTNIC_XSTATS_H_ */
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* [PATCH v16 8/8] net/ntnic: adds socket connection to PMD
  2023-09-08 16:07 ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (5 preceding siblings ...)
  2023-09-08 16:07   ` [PATCH v16 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
@ 2023-09-08 16:07   ` Mykola Kostenok
  2023-09-15 15:54   ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Ferruh Yigit
  7 siblings, 0 replies; 142+ messages in thread
From: Mykola Kostenok @ 2023-09-08 16:07 UTC (permalink / raw)
  To: dev; +Cc: mko-plv, thomas, ckm, andrew.rybchenko, ferruh.yigit

From: Christian Koue Muf <ckm@napatech.com>

The socket connection is used by Napatech's tools for monitoring
and rte_flow programming from other processes.

Signed-off-by: Christian Koue Muf <ckm@napatech.com>
Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
---
v2:
* Fixed WARNING:TYPO_SPELLING
v4:
* Fixed Alpine build
v8:
* Fixed token parser constant length.
v10:
* Fix uninitialized variables and build warnings.
---
 drivers/net/ntnic/meson.build                 |   24 +
 .../ntconnect/include/ntconn_mod_helper.h     |   97 ++
 .../net/ntnic/ntconnect/include/ntconnect.h   |   96 ++
 .../ntnic/ntconnect/include/ntconnect_api.h   |   87 ++
 .../ntconnect/include/ntconnect_api_adapter.h |  221 +++
 .../ntconnect/include/ntconnect_api_flow.h    |  168 +++
 .../ntconnect/include/ntconnect_api_meter.h   |   89 ++
 .../include/ntconnect_api_statistic.h         |  173 +++
 .../ntconnect/include/ntconnect_api_test.h    |   18 +
 drivers/net/ntnic/ntconnect/ntconn_server.c   |   97 ++
 drivers/net/ntnic/ntconnect/ntconnect.c       |  641 ++++++++
 .../ntnic/ntconnect_modules/ntconn_adapter.c  |  775 ++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_flow.c | 1312 +++++++++++++++++
 .../ntnic/ntconnect_modules/ntconn_meter.c    |  517 +++++++
 .../ntnic/ntconnect_modules/ntconn_modules.h  |   19 +
 .../net/ntnic/ntconnect_modules/ntconn_stat.c |  877 +++++++++++
 .../net/ntnic/ntconnect_modules/ntconn_test.c |  146 ++
 17 files changed, 5357 insertions(+)
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
 create mode 100644 drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
 create mode 100644 drivers/net/ntnic/ntconnect/ntconn_server.c
 create mode 100644 drivers/net/ntnic/ntconnect/ntconnect.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
 create mode 100644 drivers/net/ntnic/ntconnect_modules/ntconn_test.c

diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build
index f7454ffb79..ee8cf982ef 100644
--- a/drivers/net/ntnic/meson.build
+++ b/drivers/net/ntnic/meson.build
@@ -7,6 +7,22 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
     subdir_done()
 endif
 
+# config object
+ntnic_conf = configuration_data()
+
+# transfer options into config object
+ntnic_conf.set('NT_TOOLS', true)
+
+# check option 'debug' (boolean flag derived from meson buildtype)
+if get_option('debug')
+    cflags += '-DDEBUG'
+endif
+
+# check nt_tools build option
+if ntnic_conf.get('NT_TOOLS')
+    cflags += '-DNT_TOOLS'
+endif
+
 # includes
 includes = [
     include_directories('.'),
@@ -20,6 +36,7 @@ includes = [
     include_directories('nthw/supported'),
     include_directories('nthw/flow_api'),
     include_directories('nthw/flow_filter'),
+    include_directories('ntconnect/include'),
     include_directories('sensors'),
     include_directories('sensors/avr_sensors'),
     include_directories('sensors/board_sensors'),
@@ -41,6 +58,13 @@ sources = files(
     'nim/nt_link_speed.c',
     'nim/qsfp_sensors.c',
     'nim/sfp_sensors.c',
+    'ntconnect/ntconn_server.c',
+    'ntconnect/ntconnect.c',
+    'ntconnect_modules/ntconn_adapter.c',
+    'ntconnect_modules/ntconn_flow.c',
+    'ntconnect_modules/ntconn_meter.c',
+    'ntconnect_modules/ntconn_stat.c',
+    'ntconnect_modules/ntconn_test.c',
     'nthw/core/nthw_clock_profiles.c',
     'nthw/core/nthw_fpga.c',
     'nthw/core/nthw_fpga_nt200a0x.c',
diff --git a/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
new file mode 100644
index 0000000000..f55c4141cc
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconn_mod_helper.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MOD_HELPER_H_
+#define _NTCONN_MOD_HELPER_H_
+
+#include "ntconnect.h"
+
+/*
+ * Module parameter function call tree structures
+ */
+struct func_s {
+	const char *param;
+	struct func_s *sub_funcs;
+	int (*func)(void *hdl, int client_fd, struct ntconn_header_s *hdr,
+		    char **data, int *len);
+};
+
+static inline int ntconn_error(char **data, int *len, const char *module,
+			       enum ntconn_err_e err_code)
+{
+	*len = 0;
+	if (data) {
+		const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+		*data = malloc(4 + strlen(module) + 1 +
+			       strlen(ntcerr->err_text) + 1);
+		if (*data) {
+			sprintf(*data, "----%s:%s", module, ntcerr->err_text);
+			*len = strlen(*data) + 1; /* add 0 - terminator */
+			*(uint32_t *)*data = (uint32_t)ntcerr->err_code;
+		}
+	}
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_reply_status(char **data, int *len,
+				      enum ntconn_reply_code_e code)
+{
+	*len = 0;
+	if (data) {
+		*data = malloc(sizeof(uint32_t));
+		if (*data) {
+			*len = sizeof(uint32_t);
+			*(uint32_t *)*data = (uint32_t)code;
+		}
+	}
+	return REQUEST_OK;
+}
+
+static inline int execute_function(const char *module, void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char *function,
+				   struct func_s *func_list, char **data,
+				   int *len, int recur_depth)
+{
+	char *tok = strtok(function, ",");
+
+	if (!tok) {
+		if (recur_depth == 0)
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		else
+			return ntconn_error(data, len, module,
+					    NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE);
+	}
+
+	hdr->len -= strlen(tok) + 1;
+	char *sub_funcs = function + strlen(tok) + 1;
+	int idx = 0;
+
+	while (func_list[idx].param) {
+		if (strcmp(func_list[idx].param, tok) == 0) {
+			/* hit */
+			if (func_list[idx].sub_funcs) {
+				return execute_function(module, hdl, client_id,
+							hdr, sub_funcs,
+							func_list[idx].sub_funcs,
+							data, len,
+							++recur_depth);
+			} else if (func_list[idx].func) {
+				/* commands/parameters for function in text, zero-terminated */
+				*data = sub_funcs;
+				return func_list[idx].func(hdl, client_id, hdr,
+							   data, len);
+			} else {
+				return ntconn_error(data, len, module,
+						    NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR);
+			}
+		}
+		idx++;
+	}
+	/* no hits */
+	return ntconn_error(data, len, module,
+			    NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED);
+}
+
+#endif /* _NTCONN_MOD_HELPER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect.h b/drivers/net/ntnic/ntconnect/include/ntconnect.h
new file mode 100644
index 0000000000..9dcf2ec0a1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_H_
+#define _NTCONNECT_H_
+
+#include <rte_pci.h>
+#include <sched.h>
+#include <stdint.h>
+
+#include "ntconnect_api.h"
+
+#define REQUEST_OK 0
+#define REQUEST_ERR -1
+
+typedef struct ntconn_api_s {
+	/*
+	 * Name specifying this module. This name is used in the request string
+	 */
+	const char *module;
+	/*
+	 * The version of this module integration
+	 */
+	uint32_t version_major;
+	uint32_t version_minor;
+	/*
+	 * The request function:
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client. To be able to manage client specific data/state.
+	 * function  : pointer to the remainder of the request command (Layer 3). May be modified.
+	 *             an example: <pci_id>;adapter;get,interface,port0,link_speed function will
+	 *             then be 'get,interface,port0,link_speed'.
+	 * hdr       : header for length of command string and length of binary blop.
+	 *             The command string will start at "*data" and will have the length hdr->len.
+	 *             The binary blob will start at "&(*data)[hdr->len]" and will have the length
+	 *             hdr->blob_len.
+	 * data      : pointer to the resulting data. Typically this will be allocated.
+	 * len       : length of the data in the reply.
+	 *
+	 * return    : REQUEST_OK on success, REQUEST_ERR on failure. On failure, the data and len
+	 *             can contain an describing error text
+	 */
+	int (*request)(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		       char *function, char **data, int *len);
+	/*
+	 * After each request call, and when 'len' returns > 0, this function is called
+	 * after sending reply to client.
+	 * hdl       : pointer to the context of this instance.
+	 * data      : the data pointer returned in the request function
+	 */
+	void (*free_data)(void *hdl, char *data);
+	/*
+	 * Clean up of client specific data allocations. Called when client disconnects from server
+	 * hdl       : pointer to the context of this instance.
+	 * client_id : identifying the client.
+	 */
+	void (*client_cleanup)(void *hdl, int client_id);
+} ntconnapi_t;
+
+/*
+ * ntconn error
+ */
+typedef struct ntconn_err_s {
+	uint32_t err_code;
+	const char *err_text;
+} ntconn_err_t;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code);
+
+typedef struct ntconn_mod_s {
+	void *hdl;
+	struct pci_id_s addr;
+	const ntconnapi_t *op;
+
+	pthread_mutex_t mutex;
+	struct ntconn_mod_s *next;
+} ntconn_mod_t;
+
+struct ntconn_server_s {
+	int serv_fd;
+	int running;
+	/*
+	 * list of different pci_ids registered aka SmartNICs
+	 */
+	struct pci_id_s pci_id_list[MAX_PCI_IDS]; /* 0 - terminates */
+	cpu_set_t cpuset;
+};
+
+int ntconn_server_register(void *server);
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op);
+int ntconnect_init(const char *sockname, cpu_set_t cpuset);
+
+#endif /* _NTCONNECT_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
new file mode 100644
index 0000000000..14668bf2ee
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_H_
+#define _NTCONNECT_API_H_
+
+#include "stdint.h"
+/*
+ * NtConnect API
+ */
+
+#define NTCONNECT_SOCKET "/var/run/ntconnect/ntconnect.sock"
+
+enum ntconn_err_e {
+	NTCONN_ERR_CODE_NONE = 0U,
+	NTCONN_ERR_CODE_INTERNAL_ERROR,
+	NTCONN_ERR_CODE_INVALID_REQUEST,
+	NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR,
+	NTCONN_ERR_CODE_NO_DATA,
+	NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED,
+	NTCONN_ERR_CODE_MISSING_INVALID_PARAM,
+	NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE,
+	NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR,
+	NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+};
+
+enum ntconn_reply_code_e {
+	NTCONN_ADAPTER_ERR_PORT_STATE_FAIL = 0U,
+	NTCONN_ADAPTER_ERR_WRONG_LINK_STATE,
+	NTCONN_ADAPTER_ERR_TX_POWER_FAIL,
+};
+
+enum {
+	NTCONN_TAG_NONE,
+	NTCONN_TAG_REQUEST,
+	NTCONN_TAG_REPLY,
+	NTCONN_TAG_ERROR
+};
+
+#define MESSAGE_BUFFER 256
+#define MAX_ERR_MESSAGE_LENGTH 256
+
+struct reply_err_s {
+	enum ntconn_err_e err_code;
+	char msg[MAX_ERR_MESSAGE_LENGTH];
+};
+
+#define NTCMOD_HDR_LEN sizeof(struct ntconn_header_s)
+struct ntconn_header_s {
+	uint16_t tag;
+	uint16_t len;
+	uint32_t blob_len;
+};
+
+struct pci_id_s {
+	union {
+		uint64_t pci_id;
+		struct {
+			uint32_t domain;
+			uint8_t bus;
+			uint8_t devid;
+			uint8_t function;
+			uint8_t pad;
+		};
+	};
+};
+
+#define VERSION_HI(version) ((unsigned int)((version) >> 32))
+#define VERSION_LO(version) ((unsigned int)((version) & 0xffffffff))
+
+/*
+ * Binary interface description for ntconnect module replies
+ */
+
+/*
+ * server get,nic_pci_ids
+ */
+#define MAX_PCI_IDS 16
+#define NICS_PCI_ID_LEN 12
+
+struct ntc_nic_pci_ids_s {
+	char nic_pci_id[MAX_PCI_IDS][NICS_PCI_ID_LEN + 1];
+	int num_nics;
+};
+
+#endif /* _NTCONNECT_API_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
new file mode 100644
index 0000000000..affe905027
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_adapter.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_ADAPTER_H_
+#define _NTCONNECT_API_ADAPTER_H_
+
+/*
+ * adapter get,interfaces
+ */
+enum port_speed {
+	PORT_LINK_SPEED_UNKNOWN,
+	PORT_LINK_SPEED_NONE_REPORTED,
+	PORT_LINK_SPEED_10M,
+	PORT_LINK_SPEED_100M,
+	PORT_LINK_SPEED_1G,
+	PORT_LINK_SPEED_10G,
+	PORT_LINK_SPEED_25G,
+	PORT_LINK_SPEED_40G,
+	PORT_LINK_SPEED_50G,
+	PORT_LINK_SPEED_100G,
+};
+
+enum port_states {
+	PORT_STATE_DISABLED,
+	PORT_STATE_NIM_PRESENT,
+	PORT_STATE_NIM_ABSENT,
+	PORT_STATE_VIRTUAL_UNATTACHED,
+	PORT_STATE_VIRTUAL_SPLIT,
+	PORT_STATE_VIRTUAL_PACKED,
+	PORT_STATE_VIRTUAL_RELAY,
+};
+
+enum port_link { PORT_LINK_UNKNOWN, PORT_LINK_UP, PORT_LINK_DOWN };
+
+enum port_type {
+	PORT_TYPE_PHY_NORMAL, /* Normal phy port (no LAG) */
+	/* Link aggregated phy port in active/active LAG configuration */
+	PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE,
+	PORT_TYPE_PHY_LAG_PRIMARY, /* Primary phy port in active/backup LAG configuration */
+	PORT_TYPE_PHY_LAG_BACKUP, /* Backup phy port in active/backup LAG configuration */
+	PORT_TYPE_VIRT,
+	PORT_TYPE_LAST
+};
+
+enum nim_identifier_e {
+	NIM_UNKNOWN = 0x00, /* Nim type is unknown */
+	NIM_GBIC = 0x01, /* Nim type = GBIC */
+	NIM_FIXED = 0x02, /* Nim type = FIXED */
+	NIM_SFP_SFP_PLUS = 0x03, /* Nim type = SFP/SFP+ */
+	NIM_300_PIN_XBI = 0x04, /* Nim type = 300 pin XBI */
+	NIM_XEN_PAK = 0x05, /* Nim type = XEN-PAK */
+	NIM_XFP = 0x06, /* Nim type = XFP */
+	NIM_XFF = 0x07, /* Nim type = XFF */
+	NIM_XFP_E = 0x08, /* Nim type = XFP-E */
+	NIM_XPAK = 0x09, /* Nim type = XPAK */
+	NIM_X2 = 0x0A, /* Nim type = X2 */
+	NIM_DWDM = 0x0B, /* Nim type = DWDM */
+	NIM_QSFP = 0x0C, /* Nim type = QSFP */
+	NIM_QSFP_PLUS = 0x0D, /* Nim type = QSFP+ */
+	NIM_QSFP28 = 0x11, /* Nim type = QSFP28 */
+	NIM_CFP4 = 0x12, /* Nim type = CFP4 */
+};
+
+/*
+ * Port types
+ */
+enum port_type_e {
+	PORT_TYPE_NOT_AVAILABLE =
+		0, /* The NIM/port type is not available (unknown) */
+	PORT_TYPE_NOT_RECOGNISED, /* The NIM/port type not recognized */
+	PORT_TYPE_RJ45, /* RJ45 type */
+	PORT_TYPE_SFP_NOT_PRESENT, /* SFP type but slot is empty */
+	PORT_TYPE_SFP_SX, /* SFP SX */
+	PORT_TYPE_SFP_SX_DD, /* SFP SX digital diagnostic */
+	PORT_TYPE_SFP_LX, /* SFP LX */
+	PORT_TYPE_SFP_LX_DD, /* SFP LX digital diagnostic */
+	PORT_TYPE_SFP_ZX, /* SFP ZX */
+	PORT_TYPE_SFP_ZX_DD, /* SFP ZX digital diagnostic */
+	PORT_TYPE_SFP_CU, /* SFP copper */
+	PORT_TYPE_SFP_CU_DD, /* SFP copper digital diagnostic */
+	PORT_TYPE_SFP_NOT_RECOGNISED, /* SFP unknown */
+	PORT_TYPE_XFP, /* XFP */
+	PORT_TYPE_XPAK, /* XPAK */
+	PORT_TYPE_SFP_CU_TRI_SPEED, /* SFP copper tri-speed */
+	PORT_TYPE_SFP_CU_TRI_SPEED_DD, /* SFP copper tri-speed digital diagnostic */
+	PORT_TYPE_SFP_PLUS, /* SFP+ type */
+	PORT_TYPE_SFP_PLUS_NOT_PRESENT, /* SFP+ type but slot is empty */
+	PORT_TYPE_XFP_NOT_PRESENT, /* XFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS_NOT_PRESENT, /* QSFP type but slot is empty */
+	PORT_TYPE_QSFP_PLUS, /* QSFP type */
+	PORT_TYPE_SFP_PLUS_PASSIVE_DAC, /* SFP+ Passive DAC */
+	PORT_TYPE_SFP_PLUS_ACTIVE_DAC, /* SFP+ Active DAC */
+	PORT_TYPE_CFP4, /* CFP4 type */
+	PORT_TYPE_CFP4_LR4 = PORT_TYPE_CFP4, /* CFP4 100G, LR4 type */
+	PORT_TYPE_CFP4_NOT_PRESENT, /* CFP4 type but slot is empty */
+	PORT_TYPE_INITIALIZE, /* The port type is not fully established yet */
+	PORT_TYPE_NIM_NOT_PRESENT, /* Generic "Not present" */
+	PORT_TYPE_HCB, /* Test mode: Host Compliance Board */
+	PORT_TYPE_NOT_SUPPORTED, /* The NIM type is not supported in this context */
+	PORT_TYPE_SFP_PLUS_DUAL_RATE, /* SFP+ supports 1G/10G */
+	PORT_TYPE_CFP4_SR4, /* CFP4 100G, SR4 type */
+	PORT_TYPE_QSFP28_NOT_PRESENT, /* QSFP28 type but slot is empty */
+	PORT_TYPE_QSFP28, /* QSFP28 type */
+	PORT_TYPE_QSFP28_SR4, /* QSFP28-SR4 type */
+	PORT_TYPE_QSFP28_LR4, /* QSFP28-LR4 type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PLUS_4X10,
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_PASSIVE_DAC_4X10,
+	PORT_TYPE_QSFP_PASSIVE_DAC =
+		PORT_TYPE_QSFP_PASSIVE_DAC_4X10, /* QSFP passive DAC type */
+	/* Deprecated. The port type should not mention speed eg 4x10 or 1x40 */
+	PORT_TYPE_QSFP_ACTIVE_DAC_4X10,
+	PORT_TYPE_QSFP_ACTIVE_DAC =
+		PORT_TYPE_QSFP_ACTIVE_DAC_4X10, /* QSFP active DAC type */
+	PORT_TYPE_SFP_28, /* SFP28 type */
+	PORT_TYPE_SFP_28_SR, /* SFP28-SR type */
+	PORT_TYPE_SFP_28_LR, /* SFP28-LR type */
+	PORT_TYPE_SFP_28_CR_CA_L, /* SFP28-CR-CA-L type */
+	PORT_TYPE_SFP_28_CR_CA_S, /* SFP28-CR-CA-S type */
+	PORT_TYPE_SFP_28_CR_CA_N, /* SFP28-CR-CA-N type */
+	PORT_TYPE_QSFP28_CR_CA_L, /* QSFP28-CR-CA-L type */
+	PORT_TYPE_QSFP28_CR_CA_S, /* QSFP28-CR-CA-S type */
+	PORT_TYPE_QSFP28_CR_CA_N, /* QSFP28-CR-CA-N type */
+	PORT_TYPE_SFP_28_SR_DR, /* SFP28-SR-DR type */
+	PORT_TYPE_SFP_28_LR_DR, /* SFP28-LR-DR type */
+	PORT_TYPE_SFP_FX, /* SFP FX */
+	PORT_TYPE_SFP_PLUS_CU, /* SFP+ CU type */
+	PORT_TYPE_QSFP28_FR, /* QSFP28-FR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_DR, /* QSFP28-DR type. Uses PAM4 modulation on one lane only */
+	PORT_TYPE_QSFP28_LR, /* QSFP28-LR type. Uses PAM4 modulation on one lane only */
+};
+
+struct mac_addr_s {
+	uint8_t addr_b[6];
+};
+
+struct nim_link_length_s {
+	/* NIM link length (in meters) supported SM (9um). A value of 0xFFFF indicates that the
+	 * length is >= 65535 m
+	 */
+	uint16_t sm;
+	uint16_t ebw; /* NIM link length (in meters) supported EBW (50um) */
+	uint16_t mm50; /* NIM link length (in meters) supported MM (50um) */
+	uint16_t mm62; /* NIM link length (in meters) supported MM (62.5um) */
+	uint16_t copper; /* NIM link length (in meters) supported copper */
+};
+
+struct nim_data_s {
+	uint8_t nim_id;
+	uint8_t port_type;
+	char vendor_name[17];
+	char prod_no[17];
+	char serial_no[17];
+	char date[9];
+	char rev[5];
+	uint8_t pwr_level_req;
+	uint8_t pwr_level_cur;
+	struct nim_link_length_s link_length;
+};
+
+struct sensor {
+	uint8_t sign;
+	uint8_t type;
+	uint32_t current_value;
+	uint32_t min_value;
+	uint32_t max_value;
+	char name[50];
+};
+
+struct ntc_sensors_s {
+	uint16_t adapter_sensors_cnt;
+	uint16_t ports_cnt;
+	uint16_t nim_sensors_cnt[8];
+	char adapter_name[24];
+};
+
+#define MAX_RSS_QUEUES 128
+
+enum queue_dir_e { QUEUE_INPUT, QUEUE_OUTPUT };
+
+struct queue_s {
+	enum queue_dir_e dir;
+	int idx;
+};
+
+struct ntc_interface_s {
+	uint8_t port_id;
+	enum port_type type;
+	enum port_link link;
+	enum port_states port_state;
+	enum port_speed port_speed;
+	struct pci_id_s pci_id;
+	struct mac_addr_s mac;
+	struct nim_data_s nim_data;
+	uint16_t mtu;
+	/* attached queues */
+	struct {
+		struct queue_s queue[MAX_RSS_QUEUES];
+		int num_queues;
+	};
+};
+
+/*
+ * adapter get,interfaces
+ */
+struct ntc_interfaces_s {
+	int final_list;
+	uint8_t nb_ports;
+	struct ntc_interface_s intf[];
+};
+
+/*
+ * adapter get,info
+ */
+struct ntc_adap_get_info_s {
+	char *fw_version[32];
+};
+
+#endif /* _NTCONNECT_API_ADAPTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
new file mode 100644
index 0000000000..4091d61d7d
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_flow.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_FILTER_H_
+#define _NTCONNECT_API_FILTER_H_
+
+#include "stream_binary_flow_api.h"
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+#define MAX_FLOW_STREAM_ELEM 16
+#define MAX_FLOW_STREAM_QUERY_DATA 1024
+#define MAX_FLOW_STREAM_ERROR_MSG 128
+#define MAX_FLOW_STREAM_VXLAN_TUN_ELEM 8
+#define MAX_FLOW_STREAM_COUNT_ACTIONS 4
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_flow_err_e {
+	NTCONN_FLOW_ERR_NONE = 0,
+	NTCONN_FLOW_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED,
+	NTCONN_FLOW_ERR_INVALID_PORT,
+	NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER,
+	NTCONN_FLOW_ERR_TO_MANY_FLOWS,
+	NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED,
+	NTCONN_FLOW_ERR_NO_VF_QUEUES,
+};
+
+struct flow_elem_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_elem_eth eth;
+		struct flow_elem_vlan vlan[2];
+		struct flow_elem_ipv4 ipv4;
+		struct flow_elem_ipv6 ipv6;
+		struct flow_elem_sctp sctp;
+		struct flow_elem_tcp tcp;
+		struct flow_elem_udp udp;
+		struct flow_elem_icmp icmp;
+		struct flow_elem_vxlan vxlan;
+		struct flow_elem_port_id port_id;
+		struct flow_elem_tag tag;
+	} u;
+};
+
+struct flow_elem_cpy {
+	enum flow_elem_type type; /* element type */
+	struct flow_elem_types_s spec_cpy;
+	struct flow_elem_types_s mask_cpy;
+};
+
+struct flow_action_vxlan_encap_cpy {
+	/* Encapsulating vxlan tunnel definition */
+	struct flow_elem_cpy vxlan_tunnel[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+};
+
+struct flow_action_rss_cpy {
+	struct flow_action_rss rss;
+	uint16_t cpy_queue[FLOW_MAX_QUEUES];
+};
+
+#define MAX_ACTION_ENCAP_DATA 512
+struct flow_action_decap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_encap_cpy {
+	uint8_t data[MAX_ACTION_ENCAP_DATA];
+	size_t size;
+	struct flow_elem_cpy item_cpy
+		[RAW_ENCAP_DECAP_ELEMS_MAX]; /* Need room for end command */
+	int item_count;
+};
+
+struct flow_action_types_s {
+	int valid;
+	union {
+		int start_addr;
+		struct flow_action_rss_cpy rss;
+		struct flow_action_push_vlan vlan;
+		struct flow_action_set_vlan_vid vlan_vid;
+		struct flow_action_vxlan_encap_cpy vxlan;
+		struct flow_action_count count;
+		struct flow_action_mark mark;
+		struct flow_action_port_id port_id;
+		struct flow_action_tag tag;
+		struct flow_action_queue queue;
+		struct flow_action_decap_cpy decap;
+		struct flow_action_encap_cpy encap;
+		struct flow_action_jump jump;
+		struct flow_action_meter meter;
+	} u;
+};
+
+struct flow_action_cpy {
+	enum flow_action_type type;
+	struct flow_action_types_s conf_cpy;
+};
+
+struct query_flow_ntconnect {
+	uint8_t port;
+	struct flow_action_cpy action;
+	uint64_t flow;
+};
+
+struct create_flow_ntconnect {
+	uint8_t port;
+	uint8_t vport;
+	struct flow_attr attr;
+	struct flow_elem_cpy elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_cpy action[MAX_FLOW_STREAM_ELEM];
+};
+
+struct destroy_flow_ntconnect {
+	uint8_t port;
+	uint64_t flow;
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct flow_setport_return {
+	struct flow_queue_id_s queues[FLOW_MAX_QUEUES];
+	uint8_t num_queues;
+};
+
+struct flow_error_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct create_flow_return_s {
+	uint64_t flow;
+};
+
+struct validate_flow_return_s {
+	int status;
+};
+
+struct query_flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+	uint32_t data_length;
+	uint8_t data[];
+};
+
+struct flow_return_s {
+	enum flow_error_e type;
+	char err_msg[ERR_MSG_LEN];
+	int status;
+};
+
+struct flow_error_ntconn {
+	enum flow_error_e type;
+	char message[ERR_MSG_LEN];
+};
+
+#endif /* _NTCONNECT_API_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
new file mode 100644
index 0000000000..901b0ccba1
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_meter.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_METER_FILTER_H_
+#define _NTCONNECT_METER_FILTER_H_
+
+#define FLOW_COOKIE 0x12344321
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+#define MAX_PATH_LEN 128
+
+enum ntconn_meter_err_e {
+	NTCONN_METER_ERR_NONE = 0,
+	NTCONN_METER_ERR_INTERNAL_ERROR = 0x100,
+	NTCONN_METER_ERR_INVALID_PORT,
+	NTCONN_METER_ERR_UNEXPECTED_VIRTIO_PATH,
+	NTCONN_METER_ERR_PROFILE_ID,
+	NTCONN_METER_ERR_POLICY_ID,
+	NTCONN_METER_ERR_METER_ID,
+};
+
+enum ntconn_meter_command_e {
+	UNKNOWN_CMD,
+	ADD_PROFILE,
+	DEL_PROFILE,
+	ADD_POLICY,
+	DEL_POLICY,
+	CREATE_MTR,
+	DEL_MTR
+};
+
+#define ERR_MSG_LEN 128LLU
+
+struct meter_error_return_s {
+	enum rte_mtr_error_type type;
+	int status;
+	char err_msg[ERR_MSG_LEN];
+};
+
+struct meter_setup_s {
+	uint8_t vport;
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile profile;
+		struct {
+			struct rte_mtr_meter_policy_params policy;
+			struct rte_flow_action actions_green[2];
+			struct rte_flow_action actions_yellow[2];
+			struct rte_flow_action actions_red[2];
+		} p;
+		struct rte_mtr_params mtr_params;
+	};
+};
+
+struct meter_get_stat_s {
+	uint8_t vport;
+	uint32_t mtr_id;
+	int clear;
+};
+
+struct meter_return_stat_s {
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+};
+
+struct meter_setup_ptr_s {
+	uint32_t id;
+	int shared;
+	union {
+		struct rte_mtr_meter_profile *profile;
+		struct rte_mtr_meter_policy_params *policy;
+		struct rte_mtr_params *mtr_params;
+	};
+};
+
+struct meter_return_s {
+	int status;
+};
+
+struct meter_capabilities_return_s {
+	struct rte_mtr_capabilities cap;
+};
+
+#endif /* _NTCONNECT_METER_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
new file mode 100644
index 0000000000..1022bc2056
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_statistic.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_API_STATISTIC_H_
+#define _NTCONNECT_API_STATISTIC_H_
+
+/*
+ * Supported defined statistic records for Stat layout version 6 - defined in nthw_stat module
+ */
+#define NUM_STAT_RECORD_TYPE_COLOR \
+	(sizeof(struct color_type_fields_s) / sizeof(uint64_t))
+struct color_type_fields_s {
+	uint64_t pkts;
+	uint64_t octets;
+	uint64_t tcp_flgs;
+};
+
+#define NUM_STAT_RECORD_TYPE_FLOWMATCHER \
+	(sizeof(struct flowmatcher_type_fields_s) / sizeof(uint64_t))
+struct flowmatcher_type_fields_s {
+	/* FLM 0.17 */
+	uint64_t current;
+	uint64_t learn_done;
+	uint64_t learn_ignore;
+	uint64_t learn_fail;
+	uint64_t unlearn_done;
+	uint64_t unlearn_ignore;
+	uint64_t auto_unlearn_done;
+	uint64_t auto_unlearn_ignore;
+	uint64_t auto_unlearn_fail;
+	uint64_t timeout_unlearn_done;
+	uint64_t rel_done;
+	uint64_t rel_ignore;
+	uint64_t prb_done;
+	uint64_t prb_ignore;
+	/* FLM 0.20 */
+	uint64_t sta_done;
+	uint64_t inf_done;
+	uint64_t inf_skip;
+	uint64_t pck_hit;
+	uint64_t pck_miss;
+	uint64_t pck_unh;
+	uint64_t pck_dis;
+	uint64_t csh_hit;
+	uint64_t csh_miss;
+	uint64_t csh_unh;
+	uint64_t cuc_start;
+	uint64_t cuc_move;
+};
+
+#define NUM_STAT_RECORD_TYPE_QUEUE \
+	(sizeof(struct queue_type_fields_s) / sizeof(uint64_t))
+struct queue_type_fields_s {
+	uint64_t flush_pkts;
+	uint64_t drop_pkts;
+	uint64_t fwd_pkts;
+	uint64_t dbs_drop_pkts;
+	uint64_t flush_octets;
+	uint64_t drop_octets;
+	uint64_t fwd_octets;
+	uint64_t dbs_drop_octets;
+};
+
+/*
+ * Port stat counters for virtualization NICS with virtual ports support
+ */
+#define NUM_STAT_RECORD_TYPE_RX_PORT_VIRT \
+	(sizeof(struct rtx_type_fields_virt_s) / sizeof(uint64_t))
+/* same for Rx and Tx counters on Virt */
+#define NUM_STAT_RECORD_TYPE_TX_PORT_VIRT NUM_STAT_RECORD_TYPE_RX_PORT_VIRT
+struct rtx_type_fields_virt_s {
+	uint64_t octets;
+	uint64_t pkts;
+	uint64_t drop_events;
+	uint64_t qos_drop_octets;
+	uint64_t qos_drop_pkts;
+};
+
+/*
+ * Port RMON counters for Cap devices
+ */
+struct stat_rmon_s {
+	/* Sums that are calculated by software */
+	uint64_t drop_events;
+	uint64_t pkts;
+	/* Read from FPGA */
+	uint64_t octets;
+	uint64_t broadcast_pkts;
+	uint64_t multicast_pkts;
+	uint64_t unicast_pkts;
+	uint64_t pkts_alignment;
+	uint64_t pkts_code_violation;
+	uint64_t pkts_crc;
+	uint64_t undersize_pkts;
+	uint64_t oversize_pkts;
+	uint64_t fragments;
+	uint64_t jabbers_not_truncated;
+	uint64_t jabbers_truncated;
+	uint64_t pkts_64_octets;
+	uint64_t pkts_65_to_127_octets;
+	uint64_t pkts_128_to_255_octets;
+	uint64_t pkts_256_to_511_octets;
+	uint64_t pkts_512_to_1023_octets;
+	uint64_t pkts_1024_to_1518_octets;
+	uint64_t pkts_1519_to_2047_octets;
+	uint64_t pkts_2048_to_4095_octets;
+	uint64_t pkts_4096_to_8191_octets;
+	uint64_t pkts_8192_to_max_octets;
+};
+
+#define NUM_STAT_RECORD_TYPE_RX_PORT_CAP \
+	(sizeof(struct rx_type_fields_cap_s) / sizeof(uint64_t))
+struct rx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+	uint64_t mac_drop_events;
+	uint64_t pkts_lr;
+	/* Rx only port counters */
+	uint64_t duplicate;
+	uint64_t pkts_ip_chksum_error;
+	uint64_t pkts_udp_chksum_error;
+	uint64_t pkts_tcp_chksum_error;
+	uint64_t pkts_giant_undersize;
+	uint64_t pkts_baby_giant;
+	uint64_t pkts_not_isl_vlan_mpls;
+	uint64_t pkts_isl;
+	uint64_t pkts_vlan;
+	uint64_t pkts_isl_vlan;
+	uint64_t pkts_mpls;
+	uint64_t pkts_isl_mpls;
+	uint64_t pkts_vlan_mpls;
+	uint64_t pkts_isl_vlan_mpls;
+	uint64_t pkts_no_filter;
+	uint64_t pkts_dedup_drop;
+	uint64_t pkts_filter_drop;
+	uint64_t pkts_overflow;
+	uint64_t pkts_dbs_drop;
+	uint64_t octets_no_filter;
+	uint64_t octets_dedup_drop;
+	uint64_t octets_filter_drop;
+	uint64_t octets_overflow;
+	uint64_t octets_dbs_drop;
+	uint64_t ipft_first_hit;
+	uint64_t ipft_first_not_hit;
+	uint64_t ipft_mid_hit;
+	uint64_t ipft_mid_not_hit;
+	uint64_t ipft_last_hit;
+	uint64_t ipft_last_not_hit;
+};
+
+#define NUM_STAT_RECORD_TYPE_TX_PORT_CAP \
+	(sizeof(struct tx_type_fields_cap_s) / sizeof(uint64_t))
+struct tx_type_fields_cap_s {
+	struct stat_rmon_s rmon;
+};
+
+/*
+ * stat get,colors
+ * stat get,queues
+ * stat get,rx_counters
+ * stat get,tx_counters
+ */
+#define STAT_INFO_ELEMENTS \
+	(sizeof(struct ntc_stat_get_data_s) / sizeof(uint64_t))
+
+struct ntc_stat_get_data_s {
+	uint64_t nb_counters;
+	uint64_t timestamp;
+	uint64_t is_virt;
+	uint64_t data[];
+};
+
+#endif /* _NTCONNECT_API_STATISTIC_H_ */
diff --git a/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
new file mode 100644
index 0000000000..44cacbd931
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/include/ntconnect_api_test.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONNECT_TEST_FILTER_H_
+#define _NTCONNECT_TEST_FILTER_H_
+
+/*
+ * Create structures allocating the space to carry through ntconnect interface
+ */
+
+struct test_s {
+	int number;
+	int status;
+	uint64_t test[];
+};
+
+#endif /* _NTCONNECT_TEST_FILTER_H_ */
diff --git a/drivers/net/ntnic/ntconnect/ntconn_server.c b/drivers/net/ntnic/ntconnect/ntconn_server.c
new file mode 100644
index 0000000000..34a3c19955
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconn_server.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "ntconnect.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+/*
+ * Server module always registered on 0000:00:00.0
+ */
+#define this_module_name "server"
+
+#define NTCONNECT_SERVER_VERSION_MAJOR 0U
+#define NTCONNECT_SERVER_VERSION_MINOR 1U
+
+static int func_get_nic_pci(void *hdl, int client_fd,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "nic_pci_ids", NULL, func_get_nic_pci },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s server_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_nic_pci(void *hdl, int client_fd _unused,
+			    struct ntconn_header_s *hdr _unused, char **data,
+			    int *len)
+{
+	struct ntconn_server_s *serv = (struct ntconn_server_s *)hdl;
+	struct ntc_nic_pci_ids_s *npci =
+		calloc(1, sizeof(struct ntc_nic_pci_ids_s));
+	if (!npci) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	int i = 0;
+
+	while (i < MAX_PCI_IDS && serv->pci_id_list[i].pci_id) {
+		sprintf(npci->nic_pci_id[i], "%04x:%02x:%02x.%x",
+			serv->pci_id_list[i].domain & 0xffff,
+			serv->pci_id_list[i].bus, serv->pci_id_list[i].devid,
+			serv->pci_id_list[i].function);
+		i++;
+	}
+	npci->num_nics = i;
+	*data = (char *)npci;
+	*len = sizeof(struct ntc_nic_pci_ids_s);
+
+	return REQUEST_OK;
+}
+
+static int ntconn_server_request(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char *function,
+				 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				server_entry_funcs, data, len, 0);
+}
+
+static void ntconn_server_free_data(void *hdl _unused, char *data)
+{
+	if (data) {
+#ifdef DEBUG
+		NT_LOG(DBG, NTCONNECT, "server free data\n");
+#endif
+		free(data);
+	}
+}
+
+static const ntconnapi_t ntconn_server_op = { this_module_name,
+					      NTCONNECT_SERVER_VERSION_MAJOR,
+					      NTCONNECT_SERVER_VERSION_MINOR,
+					      ntconn_server_request,
+					      ntconn_server_free_data,
+					      NULL
+					    };
+
+int ntconn_server_register(void *server)
+{
+	const struct rte_pci_addr addr = {
+		.domain = 0, .bus = 0, .devid = 0, .function = 0
+	};
+
+	return register_ntconn_mod(&addr, server, &ntconn_server_op);
+}
diff --git a/drivers/net/ntnic/ntconnect/ntconnect.c b/drivers/net/ntnic/ntconnect/ntconnect.c
new file mode 100644
index 0000000000..697e101a03
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect/ntconnect.c
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#include "nt_util.h"
+#include "ntconnect.h"
+#include "ntconnect_api.h"
+#include "ntlog.h"
+
+/* clang-format off */
+ntconn_err_t ntconn_err[] = {
+	{NTCONN_ERR_CODE_NONE, "Success"},
+	{NTCONN_ERR_CODE_INTERNAL_ERROR, "Internal error"},
+	{NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR, "Internal error in reply from module"},
+	{NTCONN_ERR_CODE_NO_DATA, "No data found"},
+	{NTCONN_ERR_CODE_INVALID_REQUEST, "Invalid request"},
+	{NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED, "Function not yet implemented"},
+	{NTCONN_ERR_CODE_INTERNAL_FUNC_ERROR, "Internal error in function call list"},
+	{NTCONN_ERR_CODE_MISSING_INVALID_PARAM, "Missing or invalid parameter"},
+	{NTCONN_ERR_CODE_FUNCTION_PARAM_INCOMPLETE, "Function parameter is incomplete"},
+	{NTCONN_ERR_CODE_FUNC_PARAM_NOT_RECOGNIZED,
+		"Function or parameter not recognized/supported"},
+	{-1, NULL}
+};
+
+/* clang-format on */
+
+static ntconn_mod_t *ntcmod_base;
+static pthread_t tid;
+static pthread_t ctid;
+static struct ntconn_server_s ntconn_serv;
+
+const ntconn_err_t *get_ntconn_error(enum ntconn_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return &ntconn_err[idx];
+}
+
+int register_ntconn_mod(const struct rte_pci_addr *addr, void *hdl,
+			const ntconnapi_t *op)
+{
+	/* Verify and check module name is unique */
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT,
+	       "Registering pci: %04x:%02x:%02x.%x, module %s\n", addr->domain,
+	       addr->bus, addr->devid, addr->function, op->module);
+#endif
+
+	ntconn_mod_t *ntcmod = (ntconn_mod_t *)malloc(sizeof(ntconn_mod_t));
+
+	if (!ntcmod) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return -1;
+	}
+	ntcmod->addr.domain = addr->domain;
+	ntcmod->addr.bus = addr->bus;
+	ntcmod->addr.devid = addr->devid;
+	ntcmod->addr.function = addr->function;
+	ntcmod->addr.pad = 0;
+
+	ntcmod->hdl = hdl;
+	ntcmod->op = op;
+	pthread_mutex_init(&ntcmod->mutex, NULL);
+
+	ntcmod->next = ntcmod_base;
+	ntcmod_base = ntcmod;
+
+	if (ntcmod->addr.pci_id) { /* Avoid server fake pci_id */
+		int i;
+
+		for (i = 0; i < MAX_PCI_IDS; i++) {
+			if (ntconn_serv.pci_id_list[i].pci_id == 0) {
+				NT_LOG(DBG, NTCONNECT,
+				       "insert at index %i PCI ID %" PRIX64 "\n", i,
+				       ntcmod->addr.pci_id);
+				ntconn_serv.pci_id_list[i].pci_id =
+					ntcmod->addr.pci_id;
+				break;
+			} else if (ntconn_serv.pci_id_list[i].pci_id ==
+					ntcmod->addr.pci_id)
+				break;
+		}
+	}
+
+	return 0;
+}
+
+static int unix_build_address(const char *path, struct sockaddr_un *addr)
+{
+	if (addr == NULL || path == NULL)
+		return -1;
+	memset(addr, 0, sizeof(struct sockaddr_un));
+	addr->sun_family = AF_UNIX;
+	if (strlen(path) < sizeof(addr->sun_path)) {
+		rte_strscpy(addr->sun_path, path, sizeof(addr->sun_path) - 1);
+		return 0;
+	}
+	return -1;
+}
+
+#define STATUS_OK 0
+#define STATUS_INTERNAL_ERROR -1
+#define STATUS_TRYAGAIN -2
+#define STATUS_INVALID_PARAMETER -3
+#define STATUS_CONNECTION_CLOSED -4
+#define STATUS_CONNECTION_INVALID -5
+#define STATUS_TIMEOUT -6
+
+static int read_data(int fd, size_t len, uint8_t *data, size_t *recv_len,
+		     int timeout)
+{
+	struct pollfd pfd;
+	ssize_t ret;
+
+	pfd.fd = fd;
+	pfd.events = POLLIN;
+	pfd.revents = 0;
+
+	ret = poll(&pfd, 1, timeout);
+	if (ret < 0) {
+		if (errno == EINTR)
+			return STATUS_TRYAGAIN; /* Caught signal before timeout */
+		if (errno == EINVAL)
+			return STATUS_INVALID_PARAMETER; /* Timeout is negative */
+		if (errno == EFAULT)
+			return STATUS_INVALID_PARAMETER; /* Fds argument is illegal */
+		/* else */
+		assert(0);
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	if (ret == 0)
+		return STATUS_TIMEOUT;
+
+	if (pfd.revents == 0) {
+		assert(ret == 1);
+		assert(0); /* Revents cannot be zero when NtSocket_Poll returns 1 */
+		return STATUS_TRYAGAIN;
+	}
+
+	if ((pfd.revents & POLLIN) &&
+			((pfd.revents & (POLLERR | POLLNVAL)) == 0)) {
+		ret = recv(pfd.fd, data, len, 0);
+		if (ret < 0) {
+			int lerrno = errno;
+
+			if (lerrno == EWOULDBLOCK || lerrno == EAGAIN) {
+				/*
+				 * We have data but if the very first read turns out to return
+				 * EWOULDBLOCK or EAGAIN it means that the remote  end has dropped
+				 * the connection
+				 */
+				NT_LOG(DBG, NTCONNECT,
+				       "The socket with fd %d has been closed by remote end. %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_CLOSED;
+			}
+			if (lerrno != EINTR) {
+				NT_LOG(ERR, NTCONNECT,
+				       "recv() from fd %d received errno %d [%s]\n",
+				       pfd.fd, lerrno, strerror(lerrno));
+				return STATUS_CONNECTION_INVALID;
+			}
+			/* EINTR */
+			return STATUS_TRYAGAIN;
+		}
+		if (ret == 0) {
+			if (pfd.revents & POLLHUP) {
+				/* This means that we have read all data and the remote end has
+				 * HUP
+				 */
+#ifdef DEBUG
+				NT_LOG(DBG, NTCONNECT,
+				       "The remote end has terminated the session\n");
+#endif
+				return STATUS_CONNECTION_CLOSED;
+			}
+			return STATUS_TRYAGAIN;
+		}
+
+		/* Ret can only be positive at this point */
+		 *recv_len = (size_t)ret;
+		return STATUS_OK;
+	}
+
+	if ((pfd.revents & POLLHUP) == POLLHUP) {
+		/* this means that the remote end has HUP */
+		NT_LOG(DBG, NTCONNECT,
+		       "The remote end has terminated the session\n");
+		return STATUS_CONNECTION_CLOSED;
+	}
+
+	NT_LOG(ERR, NTCONNECT,
+	       "poll() returned 0x%x. Invalidating the connection\n",
+	       pfd.revents);
+	return STATUS_CONNECTION_INVALID;
+}
+
+static int read_all(int clfd, uint8_t *data, size_t length)
+{
+	size_t recv_len = 0;
+	size_t left = length;
+	size_t pos = 0;
+
+	while (left > 0) {
+		int ret = read_data(clfd, left, &data[pos], &recv_len, -1);
+
+		if (ret == STATUS_OK) {
+			pos += recv_len;
+			left -= recv_len;
+		} else {
+			if (ret == STATUS_CONNECTION_CLOSED || ret == STATUS_TIMEOUT) {
+				/* Silently return status */
+				return ret;
+			}
+			if (ret != STATUS_TRYAGAIN) {
+				NT_LOG(ERR, NTCONNECT,
+				       "Failed getting packet. Error code: 0x%X\n",
+				       ret);
+				return ret;
+			}
+		}
+		/* Try again */
+	}
+	return STATUS_OK;
+}
+
+static int write_all(int fd, const uint8_t *data, size_t size)
+{
+	size_t len = 0;
+
+	while (len < size) {
+		ssize_t res = write(fd, (const void *)&data[len], size - len);
+
+		if (res < 0) {
+			NT_LOG(ERR, NTCONNECT, "write to socket failed!");
+			return STATUS_INTERNAL_ERROR;
+		}
+		len += res;
+	}
+	return 0;
+}
+
+static int read_request(int clfd, char **rdata)
+{
+	uint8_t *data = malloc(MESSAGE_BUFFER * sizeof(uint8_t));
+
+	if (!data) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+
+	size_t recv_len = 0;
+	int ret = read_data(clfd, MESSAGE_BUFFER, data, &recv_len, -1);
+
+	if (ret) {
+		free(data);
+		return ret;
+	}
+
+	struct ntconn_header_s hdr;
+
+	memcpy(&hdr, data, NTCMOD_HDR_LEN);
+	size_t length = (hdr.len + hdr.blob_len) * sizeof(uint8_t);
+
+	if (length > MESSAGE_BUFFER) {
+		uint8_t *new_data = realloc(data, length);
+
+		if (!new_data) {
+			NT_LOG(ERR, NTCONNECT, "memory reallocation failed");
+			free(data);
+			return STATUS_INTERNAL_ERROR;
+		}
+		data = new_data;
+		ret = read_all(clfd, &data[recv_len], length - recv_len);
+		if (ret) {
+			free(data);
+			return ret;
+		}
+	}
+
+	*rdata = (char *)data;
+	return STATUS_OK;
+}
+
+static ntconn_mod_t *ntconnect_interpret_request(int clfd,
+		struct ntconn_header_s *hdr,
+		char **get_req _unused,
+		char **module_cmd, int *status)
+{
+	char pci_id[32];
+	char module[64];
+	ntconn_mod_t *result_ntcmod = NULL;
+	char *request = NULL;
+
+	int ret = read_request(clfd, &request);
+	*status = ret;
+	*get_req = request;
+
+	if (ret == STATUS_OK && request) {
+		*hdr = *(struct ntconn_header_s *)request;
+
+		if (!hdr) {
+			NT_LOG(ERR, NTCONNECT, "hdr returned NULL\n");
+			*status = STATUS_INTERNAL_ERROR;
+			return NULL;
+		}
+
+		switch (hdr->tag) {
+		case NTCONN_TAG_REQUEST: {
+			unsigned long idx = NTCMOD_HDR_LEN;
+			char *saveptr;
+			char *req = &request[idx];
+
+			uint32_t domain = 0xffffffff;
+			uint8_t bus = 0xff;
+			uint8_t devid = 0xff;
+			uint8_t function = 0xff;
+
+			char *tok = strtok_r(req, ";", &saveptr);
+
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(pci_id, tok, 31);
+
+			tok = strtok_r(NULL, ";", &saveptr);
+			idx += strlen(tok) + 1;
+			if (!tok)
+				goto err_out;
+			rte_strscpy(module, tok, 63);
+
+			tok = strtok_r(NULL, "", &saveptr);
+			hdr->len -= idx;
+			if (tok)
+				*module_cmd = &request[idx];
+
+			tok = strtok_r(pci_id, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			domain = (uint32_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			bus = (uint8_t)strtol(tok, NULL, 16);
+
+			tok = strtok_r(NULL, ":.", &saveptr);
+			if (!tok)
+				goto err_out;
+			devid = (uint8_t)strtol(tok, NULL, 16);
+			tok = strtok_r(NULL, "", &saveptr);
+			if (!tok)
+				goto err_out;
+			function = (uint8_t)strtol(tok, NULL, 16);
+
+			/* Search for module registered as <pci_id:module> */
+			ntconn_mod_t *ntcmod = ntcmod_base;
+
+			while (ntcmod) {
+				if (domain == ntcmod->addr.domain &&
+						bus == ntcmod->addr.bus &&
+						devid == ntcmod->addr.devid &&
+						function == ntcmod->addr.function &&
+						strcmp(ntcmod->op->module, module) == 0) {
+					result_ntcmod = ntcmod;
+					break;
+				}
+				ntcmod = ntcmod->next;
+			}
+		}
+		break;
+
+		default:
+			break;
+		}
+	}
+
+err_out:
+
+	return result_ntcmod;
+}
+
+static int send_reply(int clfd, uint16_t reply_tag, const void *data,
+		      uint32_t size)
+{
+	struct ntconn_header_s hdr;
+
+	hdr.tag = reply_tag;
+	hdr.len = NTCMOD_HDR_LEN + size;
+	hdr.blob_len = 0;
+	uint8_t *message = malloc(hdr.len * sizeof(uint8_t));
+
+	if (!message) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return STATUS_INTERNAL_ERROR;
+	}
+	memcpy(message, (void *)&hdr, NTCMOD_HDR_LEN);
+	memcpy(&message[NTCMOD_HDR_LEN], data, size);
+	int res = write_all(clfd, message, hdr.len);
+
+	free(message);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+static int send_reply_free_data(int clfd, ntconn_mod_t *cmod,
+				uint16_t reply_tag, void *data, uint32_t size)
+{
+	int res = send_reply(clfd, reply_tag, data, size);
+
+	if (size) {
+		pthread_mutex_lock(&cmod->mutex);
+		cmod->op->free_data(cmod->hdl, data);
+		pthread_mutex_unlock(&cmod->mutex);
+	}
+
+	return res;
+}
+
+static int ntconnect_send_error(int clfd, enum ntconn_err_e err_code)
+{
+	char err_buf[MAX_ERR_MESSAGE_LENGTH];
+	const ntconn_err_t *ntcerr = get_ntconn_error(err_code);
+
+	sprintf(err_buf, "----connect:%s", ntcerr->err_text);
+	unsigned int len = strlen(err_buf);
+	memcpy(err_buf, &ntcerr->err_code, sizeof(uint32_t));
+
+	return send_reply(clfd, NTCONN_TAG_ERROR, err_buf, len);
+}
+
+static void *ntconnect_worker(void *arg)
+{
+	int status;
+	int clfd = (int)(uint64_t)arg;
+	char *module_cmd = NULL;
+	char *request = NULL;
+	struct ntconn_header_s hdr;
+
+	do {
+		ntconn_mod_t *cmod = ntconnect_interpret_request(clfd, &hdr,
+								 &request,
+								 &module_cmd,
+								 &status);
+
+		if (cmod && module_cmd && status == 0) {
+			int len;
+			char *data;
+
+			/*
+			 * Handle general module commands
+			 */
+			if (strcmp(module_cmd, "version") == 0) {
+				uint64_t version =
+					((uint64_t)cmod->op->version_major
+					 << 32) +
+					(cmod->op->version_minor);
+
+				if (send_reply(clfd, NTCONN_TAG_REPLY,
+						(void *)&version,
+						sizeof(uint64_t)))
+					break;
+
+			} else {
+				/*
+				 * Call module for execution of command
+				 */
+				data = NULL;
+				pthread_mutex_lock(&cmod->mutex);
+				int repl = cmod->op->request(cmod->hdl, clfd,
+							     &hdr, module_cmd,
+							     &data, &len);
+				pthread_mutex_unlock(&cmod->mutex);
+
+				if (repl == REQUEST_OK && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_REPLY,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+
+				} else if (repl == REQUEST_ERR && len >= 0) {
+					if (send_reply_free_data(clfd, cmod,
+								 NTCONN_TAG_ERROR,
+								 (void *)data,
+								 (uint32_t)len))
+						break;
+				} else {
+					NT_LOG(ERR, NTCONNECT,
+					       "Invalid result from module request function: module %s, result %i\n",
+					       cmod->op->module, repl);
+					if (ntconnect_send_error(clfd,
+						NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR))
+						break;
+				}
+			}
+
+		} else if (status == STATUS_TIMEOUT) {
+			/* Other end is dead */
+			NT_LOG(WRN, NTCONNECT,
+			       "Client must be dead - timeout\n");
+			break;
+		} else if (status == STATUS_CONNECTION_CLOSED) {
+			break; /* silently break out */
+		}
+		/* Error - send error back */
+		if (ntconnect_send_error(clfd, NTCONN_ERR_CODE_INVALID_REQUEST))
+			break;
+		if (request)
+			free(request);
+	} while (1); /* while still connected */
+
+	close(clfd);
+
+	/* call module cleanup callback function for client_id */
+	ntconn_mod_t *ntcmod = ntcmod_base;
+
+	while (ntcmod) {
+		if (ntcmod->op->client_cleanup) {
+			pthread_mutex_lock(&ntcmod->mutex);
+			ntcmod->op->client_cleanup(ntcmod->hdl, clfd);
+			pthread_mutex_unlock(&ntcmod->mutex);
+		}
+
+		ntcmod = ntcmod->next;
+	}
+	pthread_exit(NULL);
+	return NULL;
+}
+
+static void *ntconnect_server(void *arg)
+{
+	struct ntconn_server_s *ntcserv = (struct ntconn_server_s *)arg;
+
+	ntcserv->running = 1;
+
+#ifdef DEBUG
+	NT_LOG(DBG, NTCONNECT, "Running NT Connection Server fd %i\n",
+	       ntcserv->serv_fd);
+#endif
+
+	if (listen(ntcserv->serv_fd, 5) < 0) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Server failed on listen(), stopping thread. err: %s\n",
+		       strerror(errno));
+		pthread_exit(NULL);
+		return NULL;
+	}
+
+	while (ntcserv->running) {
+		int clfd = accept(ntcserv->serv_fd, NULL, NULL);
+
+		if (clfd < 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "ERROR from accept(), stopping thread. err: %s\n",
+			       strerror(errno));
+			break;
+		}
+		pthread_create(&ctid, NULL, ntconnect_worker,
+			       (void *)(uint64_t)clfd);
+		pthread_setaffinity_np(ctid, sizeof(cpu_set_t),
+				       &ntcserv->cpuset);
+		/* Detach immediately. We will never join this thread */
+		pthread_detach(ctid);
+	}
+
+	pthread_exit(NULL);
+	return NULL;
+}
+
+int ntconnect_init(const char *sockname, cpu_set_t cpuset)
+{
+	if (ntcmod_base) {
+		/* Make sure the socket directory exists */
+		char *sockname_copy = strdup(sockname);
+		char *sockname_dir = dirname(sockname_copy);
+
+		if (mkdir(sockname_dir, 0755) < 0 && errno != EEXIST) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Can't create socket directory: %s",
+			       sockname_dir);
+			free(sockname_copy);
+			return -1;
+		}
+		free(sockname_copy);
+
+		/* Add server to module list - cannot work without */
+		ntconn_server_register(&ntconn_serv);
+
+		/* Start named socket server */
+		struct sockaddr_un addr;
+
+		unix_build_address(sockname, &addr);
+
+		ntconn_serv.serv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+		ntconn_serv.cpuset = cpuset;
+		if (ntconn_serv.serv_fd == -1)
+			return -1;
+
+		/* Make sure the node in filesystem is deleted otherwise bind will fail */
+		unlink(sockname);
+
+		if (bind(ntconn_serv.serv_fd, (struct sockaddr *)&addr,
+				sizeof(struct sockaddr_un)) == -1) {
+			close(ntconn_serv.serv_fd);
+			return -1;
+		}
+
+		/* Run ntconnect service */
+		pthread_create(&tid, NULL, ntconnect_server, &ntconn_serv);
+		pthread_setaffinity_np(tid, sizeof(cpu_set_t),
+				       &ntconn_serv.cpuset);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
new file mode 100644
index 0000000000..294b95846b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_adapter.c
@@ -0,0 +1,775 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntconnect_api_adapter.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+
+#define NTCONN_ADAP_VERSION_MAJOR 0U
+#define NTCONN_ADAP_VERSION_MINOR 1U
+
+#define this_module_name "adapter"
+
+#define MAX_ADAPTERS 2
+
+static struct adap_hdl_s {
+	struct drv_s *drv;
+} adap_hdl[MAX_ADAPTERS];
+
+static int func_adapter_get_interfaces(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_adapter_get_info(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len);
+static struct func_s funcs_get_level1[] = {
+	{ "interfaces", NULL, func_adapter_get_interfaces },
+	{ "info", NULL, func_adapter_get_info },
+	{ "sensors", NULL, func_adapter_get_sensors },
+	{ NULL, NULL, NULL },
+};
+
+static int func_adapter_set_interface(void *hdl, int client_id,
+				      struct ntconn_header_s *hdr, char **data,
+				      int *len);
+static int func_adapter_set_adapter(void *hdl, int client_id,
+				    struct ntconn_header_s *hdr, char **data,
+				    int *len);
+static struct func_s funcs_set_level1[] = {
+	{ "interface", NULL, func_adapter_set_interface },
+	{ "adapter", NULL, func_adapter_set_adapter },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static struct func_s adapter_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "set", funcs_set_level1, NULL },
+	{ NULL, NULL, NULL },
+};
+
+static int read_link_speed(enum nt_link_speed_e link_speed)
+{
+	switch (link_speed) {
+	case NT_LINK_SPEED_10M:
+		return PORT_LINK_SPEED_10M;
+	case NT_LINK_SPEED_100M:
+		return PORT_LINK_SPEED_100M;
+	case NT_LINK_SPEED_1G:
+		return PORT_LINK_SPEED_1G;
+	case NT_LINK_SPEED_10G:
+		return PORT_LINK_SPEED_10G;
+	case NT_LINK_SPEED_25G:
+		return PORT_LINK_SPEED_25G;
+	case NT_LINK_SPEED_40G:
+		return PORT_LINK_SPEED_40G;
+	case NT_LINK_SPEED_50G:
+		return PORT_LINK_SPEED_50G;
+	case NT_LINK_SPEED_100G:
+		return PORT_LINK_SPEED_100G;
+	default:
+		break;
+	}
+	return PORT_LINK_SPEED_UNKNOWN;
+}
+
+static nt_link_speed_t convert_link_speed(char *speed_str)
+{
+	if (strcmp(speed_str, "10M") == 0)
+		return NT_LINK_SPEED_10M;
+	else if (strcmp(speed_str, "100M") == 0)
+		return NT_LINK_SPEED_100M;
+	else if (strcmp(speed_str, "1G") == 0)
+		return NT_LINK_SPEED_1G;
+	else if (strcmp(speed_str, "10G") == 0)
+		return NT_LINK_SPEED_10G;
+	else if (strcmp(speed_str, "25G") == 0)
+		return NT_LINK_SPEED_25G;
+	else if (strcmp(speed_str, "40G") == 0)
+		return NT_LINK_SPEED_40G;
+	else if (strcmp(speed_str, "50G") == 0)
+		return NT_LINK_SPEED_50G;
+	else if (strcmp(speed_str, "100G") == 0)
+		return NT_LINK_SPEED_100G;
+	else
+		return NT_LINK_SPEED_UNKNOWN;
+}
+
+static int func_adapter_get_interfaces(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct ntc_interfaces_s *ifs;
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+	int lag_active;
+	int final_list = adap->drv->probe_finished;
+	/* keep final_list set before nb_ports are called */
+	rte_compiler_barrier();
+	int nb_ports = rte_eth_dev_count_avail();
+
+	/* Get the "internals" structure of phy port 0 to find out if we're running LAG */
+	char phy0_name[128];
+
+	rte_eth_dev_get_name_by_port(0, phy0_name);
+	struct rte_eth_dev *phy0_eth_dev = rte_eth_dev_get_by_name(phy0_name);
+
+	if (phy0_eth_dev == NULL || phy0_eth_dev->data == NULL ||
+			phy0_eth_dev->data->dev_private == NULL) {
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INTERNAL_ERROR);
+	}
+	struct pmd_internals *phy0_internals =
+		(struct pmd_internals *)phy0_eth_dev->data->dev_private;
+	lag_active = (phy0_internals->lag_config == NULL) ? 0 : 1;
+	if (lag_active) {
+		/*
+		 * Phy ports are link aggregated. I.e. number of ports is actually
+		 * one bigger than what rte_eth_dev_count_avail() returned
+		 */
+		nb_ports++;
+
+		/*
+		 * Sanity check:
+		 * For now we know about LAG with 2 ports only.
+		 * If in the future we get HW with more ports, make assert to alert
+		 * the developers that something needs to be looked at...
+		 */
+		assert(fpga_info->n_phy_ports == 2);
+	}
+
+	*len = sizeof(struct ntc_interfaces_s) +
+	       sizeof(struct ntc_interface_s) * nb_ports;
+	ifs = malloc(*len);
+	if (!ifs) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	*data = (char *)ifs;
+
+	ifs->nb_ports = nb_ports;
+	ifs->final_list = final_list;
+
+	int i;
+
+	/* First set the "port type" of the physical ports */
+	if (lag_active) {
+		if (phy0_internals->lag_config->mode == BONDING_MODE_8023AD) {
+			/* Active/active LAG */
+			for (i = 0; i < fpga_info->n_phy_ports; i++) {
+				ifs->intf[i].type =
+					PORT_TYPE_PHY_LAG_ACTIVE_ACTIVE;
+			}
+		} else if (phy0_internals->lag_config->mode ==
+				BONDING_MODE_ACTIVE_BACKUP) {
+			/* Active/backup LAG */
+			ifs->intf[phy0_internals->lag_config->primary_port]
+			.type = PORT_TYPE_PHY_LAG_PRIMARY;
+			ifs->intf[phy0_internals->lag_config->backup_port].type =
+				PORT_TYPE_PHY_LAG_BACKUP;
+		} else {
+			/* Unknown LAG mode */
+			assert(0);
+		}
+	} else {
+		/* Normal phy ports (not link aggregated) */
+		for (i = 0; i < fpga_info->n_phy_ports; i++)
+			ifs->intf[i].type = PORT_TYPE_PHY_NORMAL;
+	}
+
+	/* Then set the remaining port values for the physical ports. */
+	for (i = 0; i < fpga_info->n_phy_ports; i++) {
+		char name[128];
+
+		if (i > 0 && lag_active) {
+			/*
+			 * Secondary link aggregated port. Just display the "internals" values
+			 * from port 0
+			 */
+			rte_eth_dev_get_name_by_port(0, name);
+		} else {
+			rte_eth_dev_get_name_by_port(i, name);
+		}
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+		struct adapter_info_s *p_adapter_info =
+				&adap->drv->ntdrv.adapter_info;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		const bool port_link_status =
+			nt4ga_port_get_link_status(p_adapter_info, i);
+		ifs->intf[i].link = port_link_status ? PORT_LINK_UP :
+				    PORT_LINK_DOWN;
+
+		const nt_link_speed_t port_link_speed =
+			nt4ga_port_get_link_speed(p_adapter_info, i);
+		ifs->intf[i].port_speed = read_link_speed(port_link_speed);
+
+		const bool port_adm_state =
+			nt4ga_port_get_adm_state(p_adapter_info, i);
+		if (!port_adm_state) {
+			ifs->intf[i].port_state = PORT_STATE_DISABLED;
+		} else {
+			const bool port_nim_present =
+				nt4ga_port_get_nim_present(p_adapter_info, i);
+			if (port_nim_present) {
+				ifs->intf[i].port_state =
+					PORT_STATE_NIM_PRESENT;
+			} else {
+				ifs->intf[i].port_state = PORT_STATE_NIM_ABSENT;
+			}
+		}
+
+		/* MTU */
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Display same MTU value as port 0 */
+			rte_eth_dev_get_mtu(0, &ifs->intf[i].mtu);
+		} else {
+			rte_eth_dev_get_mtu(i, &ifs->intf[i].mtu);
+		}
+
+		/* MAC */
+		const uint64_t mac =
+			fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + i;
+		ifs->intf[i].mac.addr_b[0] = (mac >> 40) & 0xFFu;
+		ifs->intf[i].mac.addr_b[1] = (mac >> 32) & 0xFFu;
+		ifs->intf[i].mac.addr_b[2] = (mac >> 24) & 0xFFu;
+		ifs->intf[i].mac.addr_b[3] = (mac >> 16) & 0xFFu;
+		ifs->intf[i].mac.addr_b[4] = (mac >> 8) & 0xFFu;
+		ifs->intf[i].mac.addr_b[5] = (mac >> 0) & 0xFFu;
+
+		if (i > 0 && lag_active) {
+			/* Secondary link aggregated port. Queues not applicable */
+			ifs->intf[i].num_queues = 0;
+		} else {
+			/* attached hw queues to this interface */
+			unsigned int input_num = internals->nb_rx_queues;
+			/*
+			 * These are the "input" queues, meaning these go to host and is attached
+			 * to receiving from a port
+			 */
+			for (unsigned int ii = 0; ii < input_num; ii++) {
+				ifs->intf[i].queue[ii].idx =
+					internals->rxq_scg[ii].queue.hw_id;
+				ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+			}
+
+			/*
+			 * These are the "output" queues, meaning these go to a virtual port queue
+			 * which typically is used by vDPA
+			 */
+			for (unsigned int ii = 0; ii < internals->vpq_nb_vq;
+					ii++) {
+				ifs->intf[i].queue[ii + input_num].idx =
+					internals->vpq[ii].hw_id;
+				ifs->intf[i].queue[ii + input_num].dir =
+					QUEUE_OUTPUT;
+			}
+
+			ifs->intf[i].num_queues =
+				input_num + internals->vpq_nb_vq;
+		}
+
+		/* NIM information */
+		nim_i2c_ctx_t nim_ctx =
+			nt4ga_port_get_nim_capabilities(p_adapter_info, i);
+
+		strlcpy((char *)&ifs->intf[i].nim_data.vendor_name,
+			nim_ctx.vendor_name,
+			sizeof(ifs->intf[i].nim_data.vendor_name));
+		strlcpy((char *)&ifs->intf[i].nim_data.prod_no, nim_ctx.prod_no,
+			sizeof(ifs->intf[i].nim_data.prod_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.serial_no,
+			nim_ctx.serial_no,
+			sizeof(ifs->intf[i].nim_data.serial_no));
+		strlcpy((char *)&ifs->intf[i].nim_data.date, nim_ctx.date,
+			sizeof(ifs->intf[i].nim_data.date));
+		strlcpy((char *)&ifs->intf[i].nim_data.rev, nim_ctx.rev,
+			sizeof(ifs->intf[i].nim_data.rev));
+
+		if (nim_ctx.len_info[0] >= 0xFFFF)
+			ifs->intf[i].nim_data.link_length.sm = 0xFFFF;
+		else
+			ifs->intf[i].nim_data.link_length.sm =
+				nim_ctx.len_info[0];
+
+		ifs->intf[i].nim_data.link_length.ebw = nim_ctx.len_info[1];
+		ifs->intf[i].nim_data.link_length.mm50 = nim_ctx.len_info[2];
+		ifs->intf[i].nim_data.link_length.mm62 = nim_ctx.len_info[3];
+		ifs->intf[i].nim_data.link_length.copper = nim_ctx.len_info[4];
+
+		ifs->intf[i].nim_data.pwr_level_req = nim_ctx.pwr_level_req;
+		ifs->intf[i].nim_data.pwr_level_cur = nim_ctx.pwr_level_cur;
+		ifs->intf[i].nim_data.nim_id = nim_ctx.nim_id;
+		ifs->intf[i].nim_data.port_type = nim_ctx.port_type;
+	}
+
+	/* And finally handle the virtual ports. */
+	int rte_eth_dev_virt_port_offset = lag_active ? 1 :
+					   fpga_info->n_phy_ports;
+	for (; i < nb_ports; i++, rte_eth_dev_virt_port_offset++) {
+		/* Continue counting from the "i" value reached in the previous for loop */
+		char name[128];
+
+		rte_eth_dev_get_name_by_port(rte_eth_dev_virt_port_offset,
+					     name);
+		struct rte_eth_dev *eth_dev = rte_eth_dev_get_by_name(name);
+
+		struct pmd_internals *internals =
+			(struct pmd_internals *)eth_dev->data->dev_private;
+
+		ifs->intf[i].port_id = i;
+		ifs->intf[i].type = PORT_TYPE_VIRT;
+		ifs->intf[i].pci_id.domain = internals->pci_dev->addr.domain;
+		ifs->intf[i].pci_id.bus = internals->pci_dev->addr.bus;
+		ifs->intf[i].pci_id.devid = internals->pci_dev->addr.devid;
+		ifs->intf[i].pci_id.function =
+			internals->pci_dev->addr.function;
+		ifs->intf[i].pci_id.pad = 0;
+
+		ifs->intf[i].port_speed = PORT_LINK_SPEED_NONE_REPORTED;
+		switch (internals->vport_comm) {
+		case VIRT_PORT_NEGOTIATED_NONE:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_UNATTACHED;
+			ifs->intf[i].link = PORT_LINK_DOWN;
+			break;
+		case VIRT_PORT_NEGOTIATED_SPLIT:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_SPLIT;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_NEGOTIATED_PACKED:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_PACKED;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		case VIRT_PORT_USE_RELAY:
+			ifs->intf[i].port_state = PORT_STATE_VIRTUAL_RELAY;
+			ifs->intf[i].link = PORT_LINK_UP;
+			break;
+		}
+
+		/* MTU */
+		rte_eth_dev_get_mtu(rte_eth_dev_virt_port_offset,
+				    &ifs->intf[i].mtu);
+
+		/* MAC */
+		for (int ii = 0; ii < 6; ii++) {
+			ifs->intf[i].mac.addr_b[ii] =
+				internals->eth_addrs[0].addr_bytes[ii];
+		}
+
+		/* attached hw queues to this interface */
+		unsigned int input_num = internals->nb_rx_queues;
+
+		/*
+		 * These are the "input" queues, meaning these go to host and is attached to
+		 * receiving from a port
+		 */
+		for (unsigned int ii = 0; ii < input_num; ii++) {
+			ifs->intf[i].queue[ii].idx =
+				internals->rxq_scg[ii].queue.hw_id;
+			ifs->intf[i].queue[ii].dir = QUEUE_INPUT;
+		}
+
+		/*
+		 * These are the "output" queues, meaning these go to a virtual port queue
+		 * which typically is used by vDPA
+		 */
+		unsigned int numq =
+			((internals->vpq_nb_vq + input_num) > MAX_RSS_QUEUES) ?
+			MAX_RSS_QUEUES - input_num :
+			internals->vpq_nb_vq;
+		for (unsigned int ii = 0; ii < numq; ii++) {
+			ifs->intf[i].queue[ii + input_num].idx =
+				internals->vpq[ii].hw_id;
+			ifs->intf[i].queue[ii + input_num].dir = QUEUE_OUTPUT;
+		}
+		ifs->intf[i].num_queues = input_num + numq;
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_info(void *hdl, int client_id _unused,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	fpga_info_t *fpga_info = &adap->drv->ntdrv.adapter_info.fpga_info;
+
+	*len = sizeof(struct ntc_adap_get_info_s);
+	*data = malloc(*len);
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	snprintf(*data, 31, "%03d-%04d-%02d-%02d", fpga_info->n_fpga_type_id,
+		 fpga_info->n_fpga_prod_id, fpga_info->n_fpga_ver_id,
+		 fpga_info->n_fpga_rev_id);
+
+	return REQUEST_OK;
+}
+
+static int func_adapter_get_sensors(void *hdl, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	struct adapter_info_s *adapter =
+		&(((struct adap_hdl_s *)hdl)->drv->ntdrv.adapter_info);
+	struct sensor *sensor_ptr = NULL;
+	uint16_t sensors_num = 0;
+	uint8_t *sensors = NULL;
+	struct ntc_sensors_s sensors_info = {
+		.adapter_sensors_cnt = adapter->adapter_sensors_cnt,
+		.ports_cnt = adapter->fpga_info.n_phy_ports
+	};
+	memcpy(sensors_info.adapter_name, adapter->p_dev_name, 24);
+
+	/* Set a sum of sensor`s counters */
+	sensors_num = adapter->adapter_sensors_cnt;
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		sensors_num += adapter->nim_sensors_cnt[i];
+		sensors_info.nim_sensors_cnt[i] = adapter->nim_sensors_cnt[i];
+	}
+
+	*len = sizeof(struct ntc_sensors_s) +
+	       sensors_num * sizeof(struct sensor);
+
+	/* Allocate memory for sensors array */
+	sensors = malloc(*len);
+	if (!sensors) {
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	memcpy(sensors, &sensors_info, sizeof(struct ntc_sensors_s));
+	sensor_ptr = (struct sensor *)(sensors + sizeof(struct ntc_sensors_s));
+
+	/* Fetch adapter sensors */
+	for (struct nt_sensor_group *ptr = adapter->adapter_sensors;
+			ptr != NULL; ptr = ptr->next) {
+		sensor_ptr->current_value = ptr->sensor->info.value;
+		sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+		sensor_ptr->max_value = ptr->sensor->info.value_highest;
+		sensor_ptr->sign = ptr->sensor->si;
+		sensor_ptr->type = ptr->sensor->info.type;
+		memcpy(sensor_ptr->name, ptr->sensor->info.name, 50);
+		sensor_ptr++;
+	}
+
+	/* Fetch NIM sensors */
+	for (int i = 0; i < adapter->fpga_info.n_phy_ports; i++) {
+		for (struct nim_sensor_group *ptr = adapter->nim_sensors[i];
+				ptr != NULL; ptr = ptr->next) {
+			sensor_ptr->current_value = ptr->sensor->info.value;
+			sensor_ptr->min_value = ptr->sensor->info.value_lowest;
+			sensor_ptr->max_value = ptr->sensor->info.value_highest;
+			sensor_ptr->sign = ptr->sensor->si;
+			sensor_ptr->type = ptr->sensor->info.type;
+
+			memcpy(sensor_ptr->name, ptr->sensor->info.name,
+			       (strlen(ptr->sensor->info.name) >= 50) ?
+			       50 :
+			       strlen(ptr->sensor->info.name));
+			sensor_ptr++;
+		}
+	}
+
+	/* Send response */
+	 *data = (char *)sensors;
+
+	return REQUEST_OK;
+}
+
+static int set_port_enable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, true);
+
+	return REQUEST_OK;
+}
+
+static int set_port_disable(struct adap_hdl_s *adap, int port_nr)
+{
+	adapter_info_t *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	nt4ga_port_set_adm_state(p_adapter_info, port_nr, false);
+
+	return REQUEST_OK;
+}
+
+static int set_link_up(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		nt4ga_port_set_link_status(p_adapter_info, portid, true);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be up\n", portid);
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be up\n", portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_down(struct adap_hdl_s *adap, int portid)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool link_status =
+		nt4ga_port_get_link_status(p_adapter_info, portid);
+
+	if (!link_status) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: Link is already set to be down\n", portid);
+	} else {
+		nt4ga_port_set_link_status(p_adapter_info, portid, false);
+		NT_LOG(DBG, NTCONNECT, "Port %i: Link set to be down\n",
+		       portid);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_link_speed(struct adap_hdl_s *adap, int portid, char *speed_str,
+			  char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	const bool port_adm_state =
+		nt4ga_port_get_adm_state(p_adapter_info, portid);
+	if (!port_adm_state) {
+		const nt_link_speed_t speed = convert_link_speed(speed_str);
+
+		if (speed != NT_LINK_SPEED_UNKNOWN) {
+			nt4ga_port_set_link_speed(p_adapter_info, portid, speed);
+			NT_LOG(DBG, NTCONNECT, "Port %i: set link speed - %s\n",
+			       portid, speed_str);
+		} else {
+			return ntconn_error(data, len, this_module_name,
+					    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		}
+	} else {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: fail to set link speed, port is enabled\n",
+		       portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_WRONG_LINK_STATE);
+	}
+
+	return REQUEST_OK;
+}
+
+static int set_loopback_mode(struct adap_hdl_s *adap, int portid, int mode)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set loopback mode %i\n", portid, mode);
+	nt4ga_port_set_loopback_mode(p_adapter_info, portid, mode);
+	return REQUEST_OK;
+}
+
+static int set_tx_power(struct adap_hdl_s *adap, int portid, bool disable,
+			char **data, int *len)
+{
+	struct adapter_info_s *p_adapter_info = &adap->drv->ntdrv.adapter_info;
+
+	NT_LOG(DBG, NTCONNECT, "Port %i: set tx_power %i\n", portid, disable);
+	if (nt4ga_port_tx_power(p_adapter_info, portid, disable)) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Port %i: ERROR while changing tx_power\n", portid);
+		return ntconn_reply_status(data, len,
+					   NTCONN_ADAPTER_ERR_TX_POWER_FAIL);
+	}
+	return REQUEST_OK;
+}
+
+static int func_adapter_set_interface(void *hdl, int client_id _unused,
+				      struct ntconn_header_s *hdr _unused,
+				      char **data, int *len)
+{
+	struct adap_hdl_s *adap = (struct adap_hdl_s *)hdl;
+	char *saveptr;
+	int port_nr;
+	int length;
+	char *tok;
+
+	*len = 0;
+
+	/*
+	 * This will receive the request strings starting with "adapter;set,interface,...."
+	 * so in the situation of a request like: "adapter,set,interface,port0,link_speed=10G"
+	 * the remainder of the command "port0,link_speed=10G" will be pointed to by *data,
+	 * zero-terminated on entry
+	 */
+
+	if (!(data && *data))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+
+	/* OK to modify *data */
+	tok = strtok_r(*data, ",", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	length = strlen(tok);
+
+	if (!(length > 4 && memcmp(tok, "port", 4) == 0))
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	port_nr = atoi(tok + 4);
+
+	/* Only set on phy ports */
+	if (port_nr < adap->drv->ntdrv.adapter_info.fpga_info.n_phy_ports)
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+
+	tok = strtok_r(NULL, "=,", &saveptr);
+	if (!tok)
+		return ntconn_error(data, len, this_module_name,
+			NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+	if (strcmp(tok, "link_speed") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		return set_link_speed(adap, port_nr, tok, data, len);
+	} else if (strcmp(tok, "enable") == 0) {
+		return set_port_enable(adap, port_nr);
+	} else if (strcmp(tok, "disable") == 0) {
+		return set_port_disable(adap, port_nr);
+	} else if (strcmp(tok, "link_state") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "up") == 0)
+			return set_link_up(adap, port_nr);
+		else if (strcmp(tok, "down") == 0)
+			return set_link_down(adap, port_nr);
+	} else if (strcmp(tok, "host_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_HOST);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "line_loopback") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_LINE);
+		else if (strcmp(tok, "off") == 0)
+			return set_loopback_mode(adap, port_nr,
+				NT_LINK_LOOPBACK_OFF);
+	} else if (strcmp(tok, "tx_power") == 0) {
+		tok = strtok_r(NULL, ",", &saveptr);
+		if (!tok)
+			return ntconn_error(data, len, this_module_name,
+				NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+		if (strcmp(tok, "on") == 0)
+			return set_tx_power(adap, port_nr, false, data, len);
+		else if (strcmp(tok, "off") == 0)
+			return set_tx_power(adap, port_nr, true, data, len);
+	}
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_MISSING_INVALID_PARAM);
+}
+
+static int func_adapter_set_adapter(void *hdl _unused, int client_id _unused,
+				    struct ntconn_header_s *hdr _unused,
+				    char **data, int *len)
+{
+	if (data && *data) {
+		NT_LOG(DBG, NTCONNECT,
+		       "Set adapter: Command: %s\n", *data);
+	}
+
+	*len = 0;
+
+	/* Should return 0 on success */
+	return ntconn_error(data, len, this_module_name,
+			    NTCONN_ERR_CODE_NOT_YET_IMPLEMENTED);
+}
+
+static int adap_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void adap_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void adap_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_adap_op = { this_module_name,
+					    NTCONN_ADAP_VERSION_MAJOR,
+					    NTCONN_ADAP_VERSION_MINOR,
+					    adap_request,
+					    adap_free_data,
+					    adap_client_cleanup
+					  };
+
+int ntconn_adap_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_ADAPTERS; i++) {
+		if (adap_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_ADAPTERS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	adap_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&adap_hdl[i],
+				   &ntconn_adap_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
new file mode 100644
index 0000000000..3d81242524
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_flow.c
@@ -0,0 +1,1312 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_flow.h"
+#include "ntconnect_api_meter.h"
+#include "stream_binary_flow_api.h"
+
+#include <rte_errno.h>
+#include "flow_api.h"
+
+#define DEBUG_FLOW 1
+
+#define NTCONN_FLOW_VERSION_MAJOR 0U
+#define NTCONN_FLOW_VERSION_MINOR 1U
+
+#define this_module_name "filter"
+
+#define IN_PORT_TOK "in_port="
+#define VPATH_TOK "vpath="
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct flow_hdl_s {
+	struct drv_s *drv;
+} flow_hdl[MAX_CLIENTS];
+
+#define MAX_PORTS 64
+static struct port_to_eth_s {
+	struct flow_eth_dev *flw_dev;
+	uint32_t forced_vlan_vid;
+	uint32_t caller_id;
+} port_eth[MAX_PORTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_FLOW_ERR_NONE, "Success" },
+	{ NTCONN_FLOW_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_FLOW_ERR_PORT_IS_NOT_INITIALIZED, "Port is not initialized" },
+	{ NTCONN_FLOW_ERR_UNEXPECTED_VIRTIO_PATH, "Unexpected virtio path" },
+	{ NTCONN_FLOW_ERR_TO_MANY_FLOWS, "To many flows" },
+	{ NTCONN_FLOW_ERR_INVALID_PORT, "Invalid port" },
+	{ NTCONN_FLOW_ERR_NOT_YET_IMPLEMENTED, "Function not yet implemented" },
+	{ NTCONN_FLOW_ERR_UNSUPPORTED_ADAPTER, "Adapter is not supported" },
+	{ NTCONN_FLOW_ERR_NO_VF_QUEUES, "No queues for the VF is found" },
+	{ -1, NULL }
+};
+
+static const char *get_error_msg(enum ntconn_flow_err_e err_code)
+{
+	int idx = 0;
+
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+
+	return ntconn_err[idx].err_text;
+}
+
+static inline int ntconn_flow_err_reply_status(char **data, int *len,
+		enum ntconn_flow_err_e code,
+		int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg = get_error_msg(code);
+
+		memcpy(return_value->err_msg, err_msg,
+		       RTE_MIN(strlen(err_msg), ERR_MSG_LEN));
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+static inline int ntconn_flow_err_status(char **data, int *len, int err)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+		return_value->status = err;
+		return_value->type = FLOW_ERROR_GENERAL;
+		const char *err_msg =
+			get_error_msg(NTCONN_FLOW_ERR_INTERNAL_ERROR);
+		strlcpy(return_value->err_msg, err_msg, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory");
+	return REQUEST_ERR;
+}
+
+/*
+ * Filter functions
+ */
+static int func_flow_create(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_validate(void *hdl, int client_id,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len);
+static int func_flow_destroy(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static int func_flow_flush(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_query(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static int func_flow_setport(void *hdl, int client_id,
+			     struct ntconn_header_s *hdr, char **data,
+			     int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "setport", NULL, func_flow_setport },
+	{ "create", NULL, func_flow_create },
+	{ "validate", NULL, func_flow_validate },
+	{ "destroy", NULL, func_flow_destroy },
+	{ "flush", NULL, func_flow_flush },
+	{ "query", NULL, func_flow_query },
+	{ NULL, NULL, NULL },
+};
+
+static int copy_return_status(char **data, int *len, int status,
+			      struct flow_error *error)
+{
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error->type;
+		strlcpy(return_value->err_msg, error->message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static void set_error(struct flow_error *error)
+{
+	error->type = FLOW_ERROR_SUCCESS;
+	error->message = "Operation successfully completed";
+}
+
+static int func_flow_setport(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr _unused, char **data,
+			     int *len)
+{
+	uint32_t i;
+	struct flow_error error;
+	uint32_t nb_port;
+	uint8_t in_port = MAX_PORTS;
+	char vpath[MAX_PATH_LEN];
+	char *saveptr;
+
+	set_error(&error);
+
+	nb_port = rte_eth_dev_count_avail();
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+	NT_LOG(DBG, NTCONNECT, "Number of ports: %u\n", nb_port);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(IN_PORT_TOK) && memcmp(tok, IN_PORT_TOK,
+							   strlen(IN_PORT_TOK)) == 0)
+			in_port = atoi(tok + strlen(IN_PORT_TOK));
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "in_port:          %u\n", in_port);
+#endif
+
+	tok = strtok_r(NULL, ",", &saveptr);
+	if (tok) {
+		size_t length = strlen(tok);
+		if (length > strlen(VPATH_TOK) && memcmp(tok, VPATH_TOK, strlen(VPATH_TOK)) == 0)
+			strlcpy(vpath, tok + strlen(VPATH_TOK), MAX_PATH_LEN);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vpath:           %s\n", vpath);
+#endif
+
+	/* Check that the wanted ports are valid ports */
+	if (in_port >= nb_port) {
+		NT_LOG(ERR, NTCONNECT, "port out of range");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	struct pmd_internals *vp_internals = vp_path_instance_ready(vpath);
+
+	if (!vp_internals) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get VF device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Get flow device */
+	port_eth[in_port].flw_dev = vp_internals->flw_dev;
+
+	if (port_eth[in_port].flw_dev == NULL) {
+		NT_LOG(ERR, NTCONNECT, "Failed to get eth device");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Only INLINE is supported */
+	if (vp_internals->flw_dev->ndev->flow_profile !=
+			FLOW_ETH_DEV_PROFILE_INLINE) {
+		/* Only inline profile is supported */
+		NT_LOG(ERR, NTCONNECT, "Adapter is not supported");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	if (vp_internals->vpq_nb_vq == 0) {
+		NT_LOG(ERR, NTCONNECT, "No queues for the VF is found");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+	/* Server and client must agree of the virtual port number */
+	if (vp_internals->port != (in_port + 4U)) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Internal error: Virtual port out of sync");
+		return ntconn_flow_err_status(data, len, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "vport:           %u\n", vp_internals->port);
+	NT_LOG(DBG, NTCONNECT, "vlan (forced):   %u\n", vp_internals->vlan);
+#endif
+
+	port_eth[in_port].caller_id = vp_internals->port;
+	port_eth[in_port].forced_vlan_vid = vp_internals->vlan;
+
+	*data = malloc(sizeof(struct flow_setport_return));
+	if (*data) {
+		struct flow_setport_return *return_value =
+			(struct flow_setport_return *)*data;
+		*len = sizeof(struct flow_setport_return);
+		return_value->num_queues = vp_internals->vpq_nb_vq;
+
+#ifdef DEBUG_FLOW
+		NT_LOG(DBG, NTCONNECT, "Number of queues: %u\n",
+		       vp_internals->vpq_nb_vq);
+#endif
+		for (i = 0; i < vp_internals->vpq_nb_vq && i < MAX_QUEUES;
+				i++) {
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT, "Queue:            %u\n",
+			       vp_internals->vpq[i].id);
+			NT_LOG(DBG, NTCONNECT, "HW ID:            %u\n",
+			       vp_internals->vpq[i].hw_id);
+#endif
+			return_value->queues[i].id = vp_internals->vpq[i].id;
+			return_value->queues[i].hw_id =
+				vp_internals->vpq[i].hw_id;
+#ifdef DEBUG_FLOW
+			NT_LOG(DBG, NTCONNECT,
+			       "Setup output port: %u, %04x:%02x:%02x.%x\n",
+			       in_port, vp_internals->pci_dev->addr.domain,
+			       vp_internals->pci_dev->addr.bus,
+			       vp_internals->pci_dev->addr.devid,
+			       vp_internals->pci_dev->addr.function);
+#endif
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_flow_flush(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr _unused, char **data,
+			   int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	int status = -1;
+	char *saveptr;
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 5 && memcmp(tok, "port=", 5) == 0)
+			port = atoi(tok + 5);
+	}
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	/* Call filter with data */
+	status = flow_flush(port_eth[port].flw_dev, &error);
+	return copy_return_status(data, len, status, &error);
+}
+
+static int func_flow_destroy(void *hdl _unused, int client_id _unused,
+			     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	struct flow_error error;
+	int port = MAX_PORTS;
+	uint64_t flow = 0;
+	int status = -1;
+
+	struct destroy_flow_ntconnect *flow_cpy =
+		(struct destroy_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct destroy_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+	flow = flow_cpy->flow;
+
+#ifdef DEBUG_FLOW1
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow);
+#endif
+
+	/* Call filter with data */
+	status = flow_destroy(port_eth[port].flw_dev,
+			      (struct flow_handle *)flow, &error);
+
+	*data = malloc(sizeof(struct flow_return_s));
+	if (*data) {
+		struct flow_return_s *return_value =
+			(struct flow_return_s *)*data;
+		*len = sizeof(struct flow_return_s);
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static uint64_t make_flow_create(int func, int port,
+				 struct create_flow_ntconnect *flow_cpy,
+				 int *status, struct flow_error *error)
+{
+	struct flow_elem elem[MAX_FLOW_STREAM_ELEM];
+	struct flow_action action[MAX_FLOW_STREAM_ELEM];
+	struct flow_action_vxlan_encap vxlan_tun;
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
+	struct flow_elem elem_tun[MAX_FLOW_STREAM_VXLAN_TUN_ELEM];
+	int idx = -1;
+
+	struct flow_attr *attr = &flow_cpy->attr;
+	struct flow_elem_cpy *elem_cpy = flow_cpy->elem;
+	struct flow_action_cpy *action_cpy = flow_cpy->action;
+
+	error->type = FLOW_ERROR_GENERAL;
+	error->message = "To many flows";
+	*status = NTCONN_FLOW_ERR_TO_MANY_FLOWS;
+
+	attr->caller_id = port_eth[port].caller_id;
+	attr->forced_vlan_vid = port_eth[port].forced_vlan_vid;
+
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		elem[idx].type = elem_cpy[idx].type;
+		if (!elem_cpy[idx].spec_cpy.valid) {
+			elem[idx].spec = NULL;
+		} else {
+			elem[idx].spec =
+				(void *)&elem_cpy[idx].spec_cpy.u.start_addr;
+		}
+		if (!elem_cpy[idx].mask_cpy.valid) {
+			elem[idx].mask = NULL;
+		} else {
+			elem[idx].mask =
+				(void *)&elem_cpy[idx].mask_cpy.u.start_addr;
+		}
+	} while (elem_cpy[idx].type != FLOW_ELEM_TYPE_END);
+
+	idx = -1;
+	do {
+		idx++;
+		if (idx > MAX_FLOW_STREAM_ELEM)
+			goto error;
+		action[idx].type = action_cpy[idx].type;
+		if (!action_cpy[idx].conf_cpy.valid) {
+			action[idx].conf = NULL;
+		} else {
+			switch (action_cpy[idx].type) {
+			case FLOW_ACTION_TYPE_VXLAN_ENCAP: {
+				/*
+				 * Special VXLAN ENCAP treatment create inner tunnel
+				 * elements in action
+				 */
+				struct flow_elem_cpy *tun_elem_cpy =
+					(struct flow_elem_cpy *)action_cpy[idx]
+					.conf_cpy.u.vxlan.vxlan_tunnel;
+				vxlan_tun.vxlan_tunnel = elem_tun;
+				int tun_idx = -1;
+
+				do {
+					tun_idx++;
+					if (tun_idx >
+							MAX_FLOW_STREAM_VXLAN_TUN_ELEM) {
+						error->message =
+							"To many VXLAN tunnels";
+						goto error;
+					}
+					elem_tun[tun_idx].type =
+						tun_elem_cpy[tun_idx].type;
+					if (!tun_elem_cpy[tun_idx]
+							.spec_cpy.valid) {
+						elem_tun[tun_idx].spec = NULL;
+					} else {
+						elem_tun[tun_idx].spec =
+							(void *)&tun_elem_cpy[tun_idx]
+							.spec_cpy.u
+							.start_addr;
+					}
+					if (!tun_elem_cpy[tun_idx]
+							.mask_cpy.valid) {
+						elem_tun[tun_idx].mask = NULL;
+					} else {
+						elem_tun[tun_idx].mask =
+							(void *)&tun_elem_cpy[tun_idx]
+							.mask_cpy.u
+							.start_addr;
+					}
+				} while (tun_elem_cpy[tun_idx].type !=
+						FLOW_ELEM_TYPE_END);
+				/* VXLAN ENCAP tunnel finished */
+				action[idx].conf = &vxlan_tun;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RSS: {
+				/* Need to set queue pointer */
+				action_cpy[idx].conf_cpy.u.rss.rss.queue =
+					(const uint16_t *)&action_cpy[idx]
+					.conf_cpy.u.rss.cpy_queue;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.rss.rss;
+			}
+			break;
+			case FLOW_ACTION_TYPE_METER: {
+				/* Need to convert meter ID to uniq ID for the VF */
+				action_cpy[idx].conf_cpy.u.meter.mtr_id =
+					((flow_mtr_meters_supported() /
+					  (RTE_MAX_ETHPORTS - 2)) *
+					 (flow_cpy->vport - 4)) +
+					action_cpy[idx].conf_cpy.u.meter.mtr_id;
+				action[idx].conf = (void *)&action_cpy[idx]
+						   .conf_cpy.u.meter;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_ENCAP: {
+				encap.preserve = NULL;
+				encap.data =
+					action_cpy[idx].conf_cpy.u.encap.data;
+				encap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.encap.item_count;
+				encap.size =
+					action_cpy[idx].conf_cpy.u.encap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.encap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many encap items";
+						goto error;
+					}
+					encap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.encap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						encap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.encap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						encap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.encap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						encap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &encap;
+			}
+			break;
+			case FLOW_ACTION_TYPE_RAW_DECAP: {
+				decap.data =
+					action_cpy[idx].conf_cpy.u.decap.data;
+				decap.item_count =
+					action_cpy[idx]
+					.conf_cpy.u.decap.item_count;
+				decap.size =
+					action_cpy[idx].conf_cpy.u.decap.size;
+
+				for (int eidx = 0;
+						eidx <
+						action_cpy[idx].conf_cpy.u.decap.item_count;
+						eidx++) {
+					if (eidx > RAW_ENCAP_DECAP_ELEMS_MAX) {
+						error->message =
+							"To many decap items";
+						goto error;
+					}
+					decap.items[eidx].type =
+						action_cpy[idx]
+						.conf_cpy.u.decap
+						.item_cpy[eidx]
+						.type;
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.spec_cpy.valid) {
+						decap.items[eidx].spec =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.spec_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].spec = NULL;
+					}
+					if (action_cpy[idx]
+							.conf_cpy.u.decap
+							.item_cpy[eidx]
+							.mask_cpy.valid) {
+						decap.items[eidx].mask =
+							(void *)&action_cpy[idx]
+							.conf_cpy.u
+							.decap
+							.item_cpy[eidx]
+							.mask_cpy.u
+							.start_addr;
+					} else {
+						decap.items[eidx].mask = NULL;
+					}
+				}
+				action[idx].conf = &decap;
+			}
+			break;
+			default: {
+				/* Move conf pointer into conf_cpy data field */
+				action[idx].conf =
+					(void *)&action_cpy[idx]
+					.conf_cpy.u.start_addr;
+			}
+			break;
+			}
+		}
+	} while (action_cpy[idx].type != FLOW_ACTION_TYPE_END);
+
+	*status = NTCONN_FLOW_ERR_NONE;
+	if (func == FLOW_API_FUNC_VALIDATE) {
+		*status = flow_validate(port_eth[port].flw_dev, elem, action,
+					error);
+		return 0ULL;
+	} else {
+		return (uint64_t)flow_create(port_eth[port].flw_dev, attr, elem,
+					     action, error);
+	}
+
+error:
+	return 0;
+}
+
+static int func_flow_create(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	uint64_t flow = 0UL;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	port = flow_cpy->port;
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+						    NTCONN_FLOW_ERR_INVALID_PORT,
+						    ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[3] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[1] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[2] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[3] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	flow = make_flow_create(FLOW_API_FUNC_CREATE, port, flow_cpy, &status,
+				&error);
+	if (flow) {
+		*data = malloc(sizeof(struct create_flow_return_s));
+		if (!*data)
+			goto create_flow_error_malloc;
+		struct create_flow_return_s *return_value =
+			(struct create_flow_return_s *)*data;
+		*len = sizeof(struct create_flow_return_s);
+		return_value->flow = flow;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto create_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+create_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_validate(void *hdl _unused, int client_id _unused,
+			      struct ntconn_header_s *hdr, char **data,
+			      int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+
+	struct create_flow_ntconnect *flow_cpy =
+		(struct create_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct create_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "func_flow_create\n");
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_PARSING
+	int i;
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		if (flow_cpy[i].elem[i].type == FLOW_ELEM_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->elem[i].type) {
+		case FLOW_ELEM_TYPE_IPV4:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV4 %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     src_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_ip:   %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.spec_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     src_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.src_ip)[0] & 0xFF);
+			NT_LOG(DBG, NTCONNECT, "     dst_mask: %u.%u.%u.%u\n",
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF,
+				((const char *)&flow_cpy->elem[i]
+					.mask_cpy.u.ipv4.hdr.dst_ip)[0] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_ETH:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ETH %i\n", i);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mac:  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].spec_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     src mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.s_addr.addr_b[5] & 0xFF);
+			NT_LOG(DBG, NTCONNECT,
+			       "     dst mask  %02X:%02X:%02X:%02X:%02X:%02X\n",
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[0] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[1] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[2] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[3] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[4] & 0xFF,
+			       flow_cpy->elem[i].mask_cpy.u.eth.d_addr.addr_b[5] & 0xFF);
+			break;
+		case FLOW_ELEM_TYPE_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_IPV6:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_IPV6 %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_SCTP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_SCTP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_TCP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_TCP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_UDP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_UDP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_ICMP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_ICMP %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_VXLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_VXLAN %i\n", i);
+			break;
+		case FLOW_ELEM_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ELEM_TYPE_PORT_ID %i\n",
+			       i);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown item %u\n",
+			       flow_cpy->elem[i].type);
+			break;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOW_STREAM_ELEM; i++) {
+		uint32_t j;
+
+		if (flow_cpy->action[i].type == FLOW_ACTION_TYPE_END) {
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_END\n");
+			break;
+		}
+		switch (flow_cpy->action[i].type) {
+		case FLOW_ACTION_TYPE_RSS:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_RSS %i\n", i);
+			NT_LOG(DBG, NTCONNECT, "     queue nb: %u\n",
+			       flow_cpy->action[i].conf_cpy.u.rss.rss.queue_num);
+			NT_LOG(DBG, NTCONNECT, "     queue:    ");
+			for (j = 0;
+					j < flow_cpy->action[i]
+					.conf_cpy.u.rss.rss.queue_num &&
+					j < FLOW_MAX_QUEUES;
+					j++) {
+				NT_LOG(DBG, NTCONNECT, "%u ",
+				       flow_cpy->action[i]
+				       .conf_cpy.u.rss.cpy_queue[j]);
+			}
+			NT_LOG(DBG, NTCONNECT, "\n");
+			break;
+
+		case FLOW_ACTION_TYPE_POP_VLAN:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_POP_VLAN %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_PUSH_VLAN:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PUSH_VLAN %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_VID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_VID %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_SET_VLAN_PCP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_VLAN_PCP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_DECAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_DECAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_VXLAN_ENCAP:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_VXLAN_ENCAP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_DROP:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_DROP %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_COUNT:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_COUNT %i\n",
+			       i);
+			break;
+		case FLOW_ACTION_TYPE_MARK:
+			NT_LOG(DBG, NTCONNECT, "FLOW_ACTION_TYPE_MARK %i\n", i);
+			break;
+		case FLOW_ACTION_TYPE_PORT_ID:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_PORT_ID %i: ID=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.port_id.id);
+			break;
+		case FLOW_ACTION_TYPE_QUEUE:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_QUEUE %i: queue=%u\n", i,
+			       flow_cpy->action[i].conf_cpy.u.queue.index);
+			break;
+		case FLOW_ACTION_TYPE_SET_TAG:
+			NT_LOG(DBG, NTCONNECT,
+			       "FLOW_ACTION_TYPE_SET_TAG %i: idx=%u, data=%u, mask=%X\n",
+			       i, flow_cpy->action[i].conf_cpy.u.tag.index,
+			       flow_cpy->action[i].conf_cpy.u.tag.data,
+			       flow_cpy->action[i].conf_cpy.u.tag.mask);
+			break;
+		default:
+			NT_LOG(DBG, NTCONNECT, "Unknown action %u\n",
+			       flow_cpy->action[i].type);
+			break;
+		}
+	}
+#endif
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	return copy_return_status(data, len, status, &error);
+
+	/* Call filter with data */
+	make_flow_create(FLOW_API_FUNC_VALIDATE, port, flow_cpy, &status,
+			 &error);
+	if (!status) {
+		*data = malloc(sizeof(struct validate_flow_return_s));
+		if (!*data)
+			goto validate_flow_error_malloc;
+		struct validate_flow_return_s *return_value =
+			(struct validate_flow_return_s *)*data;
+		*len = sizeof(struct validate_flow_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+	*data = malloc(sizeof(struct flow_error_return_s));
+	if (!*data)
+		goto validate_flow_error_malloc;
+	struct flow_error_return_s *return_value =
+		(struct flow_error_return_s *)*data;
+	*len = sizeof(struct flow_error_return_s);
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return REQUEST_OK;
+
+validate_flow_error_malloc:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+static int func_flow_query(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status;
+	struct flow_error error;
+	int port = MAX_PORTS;
+	struct flow_handle *flow;
+
+	struct query_flow_ntconnect *flow_cpy =
+		(struct query_flow_ntconnect *)&(*data)[hdr->len];
+
+	if (hdr->blob_len != sizeof(struct query_flow_ntconnect)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error in filter data");
+		return ntconn_error(data, len, this_module_name,
+				    NTCONN_ERR_CODE_INVALID_REQUEST);
+	}
+
+	set_error(&error);
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	port = flow_cpy->port;
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "Port id=%u\n", port);
+#endif
+
+	if (port >= MAX_PORTS) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "port id out of range");
+		return ntconn_flow_err_reply_status(data, len,
+			NTCONN_FLOW_ERR_INVALID_PORT, ENODEV);
+	}
+
+#ifdef DEBUG_FLOW
+	NT_LOG(DBG, NTCONNECT, "flow=0x%016llX\n",
+	       (unsigned long long)flow_cpy->flow);
+#endif
+
+	flow = (struct flow_handle *)flow_cpy->flow;
+
+	const struct flow_action action = {
+		flow_cpy->action.type,
+		(const void *)&flow_cpy->action.conf_cpy.u.count
+	};
+
+	/* Call filter with data */
+	void *data_out = NULL;
+	uint32_t length = 0;
+
+	status = flow_query(port_eth[port].flw_dev, flow, &action, &data_out,
+			    &length, &error);
+
+	*data = malloc(sizeof(struct query_flow_return_s) + length);
+	if (*data) {
+		struct query_flow_return_s *return_value =
+			(struct query_flow_return_s *)*data;
+		*len = sizeof(struct query_flow_return_s) + length;
+
+		return_value->status = status;
+		return_value->type = error.type;
+		strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+
+		if (data_out) {
+			memcpy(return_value->data, data_out, length);
+			return_value->data_length = length;
+			free(data_out);
+		} else {
+			return_value->data_length = 0;
+		}
+		return REQUEST_OK;
+	}
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s",
+	       __func__);
+	return REQUEST_ERR;
+}
+
+static int flow_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void flow_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void flow_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_flow_op = { this_module_name,
+					    NTCONN_FLOW_VERSION_MAJOR,
+					    NTCONN_FLOW_VERSION_MINOR,
+					    flow_request,
+					    flow_free_data,
+					    flow_client_cleanup
+					  };
+
+int ntconn_flow_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (flow_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	flow_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&flow_hdl[i],
+				   &ntconn_flow_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
new file mode 100644
index 0000000000..7c21690f8b
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_meter.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_meter.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_errno.h>
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#define NTCONN_METER_VERSION_MAJOR 0U
+#define NTCONN_METER_VERSION_MINOR 1U
+
+#define this_module_name "meter"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct meter_hdl_s {
+	struct drv_s *drv;
+} meter_hdl[MAX_CLIENTS];
+
+static ntconn_err_t ntconn_err[] = {
+	{ NTCONN_METER_ERR_NONE, "Success" },
+	{ NTCONN_METER_ERR_INTERNAL_ERROR, "Internal error" },
+	{ NTCONN_METER_ERR_INVALID_PORT, "Invalid virtual port" },
+	{ NTCONN_METER_ERR_PROFILE_ID, "Profile ID out of range" },
+	{ NTCONN_METER_ERR_POLICY_ID, "Policy ID out of range" },
+	{ NTCONN_METER_ERR_METER_ID, "Meter ID out of range" },
+	{ -1, NULL }
+};
+
+/********************************************************************/
+/* Get error message corresponding to the error code                */
+/********************************************************************/
+static const char *get_error_msg(uint32_t err_code)
+{
+	int idx = 0;
+
+	if (err_code < NTCONN_METER_ERR_INTERNAL_ERROR) {
+		const ntconn_err_t *err_msg = get_ntconn_error(err_code);
+
+		return err_msg->err_text;
+	}
+	while (ntconn_err[idx].err_code != (uint32_t)-1 &&
+			ntconn_err[idx].err_code != err_code)
+		idx++;
+	if (ntconn_err[idx].err_code == (uint32_t)-1)
+		idx = 1;
+	return ntconn_err[idx].err_text;
+}
+
+/*
+ * Filter functions
+ */
+static int func_meter_get_capabilities(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+static int func_meter_setup(void *hdl, int client_id,
+			    struct ntconn_header_s *hdr, char **data, int *len);
+static int func_meter_read(void *hdl, int client_id,
+			   struct ntconn_header_s *hdr, char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "capabilities", NULL, func_meter_get_capabilities },
+	{ "setup", NULL, func_meter_setup },
+	{ "read", NULL, func_meter_read },
+	{ NULL, NULL, NULL },
+};
+
+/**********************************************************************/
+/* copy error message corresponding to the error code to error struct */
+/**********************************************************************/
+static void copy_mtr_error(struct rte_mtr_error *error, uint32_t err)
+{
+	error->type = RTE_MTR_ERROR_TYPE_UNSPECIFIED;
+	error->message = get_error_msg(err);
+	error->cause = NULL;
+}
+
+static int func_meter_get_capabilities(void *hdl _unused, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	char *saveptr;
+	uint8_t vport = 0;
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_capabilities cap;
+	struct rte_mtr_error error;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length > 6 && memcmp(tok, "vport=", 6) == 0)
+			vport = atoi(tok + 6);
+	}
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "vport=%u\n", vport);
+#endif
+
+	if (vport == 0 || vport > 64) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_get_capa;
+	}
+
+	port = vport & 1;
+	status = rte_mtr_capabilities_get(port, &cap, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_capabilities_return_s));
+		if (!*data)
+			goto error_get_capa_malloc;
+		struct meter_capabilities_return_s *return_value =
+			(struct meter_capabilities_return_s *)*data;
+		*len = sizeof(struct meter_capabilities_return_s);
+		memcpy(&return_value->cap, &cap,
+		       sizeof(struct rte_mtr_capabilities));
+		return REQUEST_OK;
+	}
+
+error_get_capa:
+
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to get capabilities for port %u (%u)",
+	       port, vport);
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_get_capa_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_get_capa_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_setup(void *hdl _unused, int client_id _unused,
+			    struct ntconn_header_s *hdr, char **data, int *len)
+{
+	char *saveptr;
+	uint8_t port;
+	uint32_t max_id;
+	int status;
+	struct rte_mtr_error error;
+	int command = UNKNOWN_CMD;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: \"%s\"\n", __func__, *data);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_setup_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Error: Profile data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_setup;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_setup_s *cpy_data =
+		(struct meter_setup_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_setup;
+	}
+
+	char *tok = strtok_r(*data, ",", &saveptr);
+
+	if (tok) {
+		int length = strlen(tok);
+
+		if (length == 6) {
+			if (memcmp(tok, "addpro", 6) == 0)
+				command = ADD_PROFILE;
+
+			else if (memcmp(tok, "delpro", 6) == 0)
+				command = DEL_PROFILE;
+
+			else if (memcmp(tok, "addpol", 6) == 0)
+				command = ADD_POLICY;
+
+			else if (memcmp(tok, "delpol", 6) == 0)
+				command = DEL_POLICY;
+
+			else if (memcmp(tok, "crtmtr", 6) == 0)
+				command = CREATE_MTR;
+
+			else if (memcmp(tok, "delmtr", 6) == 0)
+				command = DEL_MTR;
+		}
+	}
+
+	if (command == UNKNOWN_CMD) {
+		NT_LOG(ERR, NTCONNECT, "Error: Invalid command");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINVAL;
+		goto error_meter_setup;
+	}
+
+	/* Port will be either 0 or 1 depending on the VF. */
+	port = cpy_data->vport & 1;
+
+	switch (command) {
+	case ADD_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_add(port, cpy_data->id,
+						   &cpy_data->profile, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add profile for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_PROFILE:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Profile ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_PROFILE_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_meter_profile_delete(port, cpy_data->id,
+						      &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete profile for port %u (%u)",
+			       port, cpy_data->vport);
+		}
+		break;
+	case ADD_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->p.policy.actions[RTE_COLOR_GREEN] =
+			cpy_data->p.actions_green;
+		cpy_data->p.policy.actions[RTE_COLOR_YELLOW] =
+			cpy_data->p.actions_yellow;
+		cpy_data->p.policy.actions[RTE_COLOR_RED] =
+			cpy_data->p.actions_red;
+		status = rte_mtr_meter_policy_add(port, cpy_data->id,
+						  &cpy_data->p.policy, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to add policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_POLICY:
+		max_id = flow_mtr_meter_policy_n_max() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Policy ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_POLICY_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status =
+			rte_mtr_meter_policy_delete(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to delete policy for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case CREATE_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		cpy_data->mtr_params.meter_profile_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_profile_id;
+		cpy_data->mtr_params.meter_policy_id =
+			((cpy_data->vport - 4) *
+			 (flow_mtr_meter_policy_n_max() /
+			  (RTE_MAX_ETHPORTS - 2))) +
+			cpy_data->mtr_params.meter_policy_id;
+		status = rte_mtr_create(port, cpy_data->id,
+					&cpy_data->mtr_params, cpy_data->shared,
+					&error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to create meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	case DEL_MTR:
+		max_id = flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2);
+		if (cpy_data->id > max_id) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Error: Meter ID %u out of range. Max value is %u",
+			       cpy_data->id, max_id);
+			copy_mtr_error(&error, NTCONN_METER_ERR_METER_ID);
+			status = -EINVAL;
+			goto error_meter_setup;
+		}
+		cpy_data->id = ((cpy_data->vport - 4) * max_id) + cpy_data->id;
+		status = rte_mtr_destroy(port, cpy_data->id, &error);
+		if (status != 0) {
+			NT_LOG(ERR, NTCONNECT,
+			       "Failed to destroy meter for port %u (%u)", port,
+			       cpy_data->vport);
+		}
+		break;
+	}
+
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_s));
+		if (!*data)
+			goto error_meter_setup_malloc;
+		struct meter_return_s *return_value =
+			(struct meter_return_s *)*data;
+		*len = sizeof(struct meter_return_s);
+		return_value->status = 0;
+		return REQUEST_OK;
+	}
+
+error_meter_setup:
+
+	/* Handle errors by copy errors to the error struct */
+	 *data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_setup_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	return_value->status = status;
+	return_value->type = error.type;
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return REQUEST_OK;
+
+error_meter_setup_malloc:
+
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int func_meter_read(void *hdl _unused, int client_id _unused,
+			   struct ntconn_header_s *hdr, char **data, int *len)
+{
+	uint8_t port = 0;
+	int status;
+	struct rte_mtr_error error;
+	struct rte_mtr_stats stats;
+	uint64_t stats_mask;
+
+#ifdef DEBUG_METER
+	NT_LOG(DBG, NTCONNECT, "%s: [%s:%u] enter\n", __func__, __FILE__, __LINE__);
+#endif
+
+	if (hdr->blob_len != sizeof(struct meter_get_stat_s)) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT,
+		       "Error: Read meter stats data size is illegal");
+		copy_mtr_error(&error, NTCONN_ERR_CODE_INVALID_REQUEST);
+		status = -EINTR;
+		goto error_meter_read;
+	}
+
+	/* Get the data blob containing the data for the meter function */
+	struct meter_get_stat_s *cpy_data =
+		(struct meter_get_stat_s *)&(*data)[hdr->len];
+
+	if (cpy_data->vport < 4 || cpy_data->vport > 128) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "Virtual port is invalid");
+		copy_mtr_error(&error, NTCONN_METER_ERR_INVALID_PORT);
+		status = -ENODEV;
+		goto error_meter_read;
+	}
+
+	port = cpy_data->vport & 1;
+	cpy_data->mtr_id =
+		((cpy_data->vport - 4) *
+		 (flow_mtr_meters_supported() / (RTE_MAX_ETHPORTS - 2))) +
+		cpy_data->mtr_id;
+	status = rte_mtr_stats_read(port, cpy_data->mtr_id, &stats, &stats_mask,
+				    cpy_data->clear, &error);
+	if (status == 0) {
+		/* Handle success by copying the return values to the return struct */
+		*data = malloc(sizeof(struct meter_return_stat_s));
+		if (!*data)
+			goto error_meter_read_malloc;
+		struct meter_return_stat_s *return_value =
+			(struct meter_return_stat_s *)*data;
+		*len = sizeof(struct meter_return_stat_s);
+		return_value->stats_mask = stats_mask;
+		memcpy(&return_value->stats, &stats,
+		       sizeof(struct rte_mtr_stats));
+		return REQUEST_OK;
+	}
+
+error_meter_read:
+	/* Handle errors by copy errors to the error struct */
+	NT_LOG(ERR, NTCONNECT, "Failed to read meter stats");
+	*data = malloc(sizeof(struct meter_error_return_s));
+	if (!*data)
+		goto error_meter_read_malloc;
+	struct meter_error_return_s *return_value =
+		(struct meter_error_return_s *)*data;
+	*len = sizeof(struct meter_error_return_s);
+	strlcpy(return_value->err_msg, error.message, ERR_MSG_LEN);
+	return_value->status = status;
+	return_value->type = error.type;
+	return REQUEST_OK;
+
+error_meter_read_malloc:
+	*len = 0;
+	return REQUEST_ERR;
+}
+
+static int meter_request(void *hdl, int client_id _unused,
+			 struct ntconn_header_s *hdr, char *function,
+			 char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void meter_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void meter_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_meter_op = { this_module_name,
+					     NTCONN_METER_VERSION_MAJOR,
+					     NTCONN_METER_VERSION_MINOR,
+					     meter_request,
+					     meter_free_data,
+					     meter_client_cleanup
+					   };
+
+int ntconn_meter_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (meter_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	meter_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&meter_hdl[i],
+				   &ntconn_meter_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
new file mode 100644
index 0000000000..ea379015fe
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_modules.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _NTCONN_MODULES_H_
+#define _NTCONN_MODULES_H_
+
+#include "ntos_system.h"
+
+/*
+ * All defined NT connection modules
+ */
+int ntconn_adap_register(struct drv_s *drv);
+int ntconn_stat_register(struct drv_s *drv);
+int ntconn_flow_register(struct drv_s *drv);
+int ntconn_meter_register(struct drv_s *drv);
+int ntconn_test_register(struct drv_s *drv);
+
+#endif /* _NTCONN_MODULES_H_ */
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
new file mode 100644
index 0000000000..437cf9ddad
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_stat.c
@@ -0,0 +1,877 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <rte_dev.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+#include "ntconnect.h"
+#include "ntconnect_api_statistic.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_xstats.h"
+
+#define STAT_VERSION_MAJOR 0U
+#define STAT_VERSION_MINOR 2U
+
+#define this_module_name "stat"
+
+/*
+ * Supported Stat Layout Versions
+ */
+#define NUM_LAYOUT_VERSIONS_SUPPORTED (RTE_DIM(layout_versions_supported))
+static int layout_versions_supported[] = {
+	6,
+	/*
+	 * Add here other layout versions to support
+	 * When more versions are added, add new version dependent binary reply structures
+	 * in ntconnect_api.h file for client to select on reading layout_version
+	 */
+};
+
+enum snap_addr_select_e {
+	SNAP_COLORS,
+	SNAP_QUEUES,
+	SNAP_RX_PORT,
+	SNAP_TX_PORT,
+	SNAP_ADDR_COUNT
+};
+
+struct snap_addr_s {
+	const uint64_t *ptr;
+	unsigned int size;
+};
+
+struct snaps_s {
+	int client_id;
+	/* Pointers into buffer */
+	struct snap_addr_s snap_addr[SNAP_ADDR_COUNT];
+	uint64_t *buffer;
+	struct snaps_s *next;
+};
+
+static struct stat_hdl {
+	struct drv_s *drv;
+	nt4ga_stat_t *p_nt4ga_stat;
+	struct snaps_s *snaps_base;
+} stat_hdl;
+
+enum stat_type_e {
+	STAT_TYPE_COLOR,
+	STAT_TYPE_QUEUE,
+	STAT_TYPE_RX,
+	STAT_TYPE_TX,
+	STAT_TYPE_FLOWMATCHER
+};
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr, char **data,
+				 int *len);
+static struct func_s func_snap_level2[] = {
+	{ "colors", NULL, func_get_snap_colors },
+	{ "queues", NULL, func_get_snap_queues },
+	{ "rx_counters", NULL, func_get_snap_rx_port },
+	{ "tx_counters", NULL, func_get_snap_tx_port },
+	{ NULL, NULL, NULL },
+};
+
+static int func_get_layout_version(void *hdl, int client_id,
+				   struct ntconn_header_s *hdr, char **data,
+				   int *len);
+static int func_get_flm(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			char **data, int *len);
+static int func_get_color(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_queue(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			  char **data, int *len);
+static int func_get_rx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_tx_counters(void *hdl, int client_id,
+				struct ntconn_header_s *hdr, char **data,
+				int *len);
+static int func_get_flm_layout_version(void *hdl, int client_id,
+				       struct ntconn_header_s *hdr, char **data,
+				       int *len);
+
+static struct func_s funcs_get_level1[] = {
+	{ "snapshot", func_snap_level2, NULL },
+	{ "layout_version", NULL, func_get_layout_version },
+	{ "flm", NULL, func_get_flm },
+	{ "colors", NULL, func_get_color },
+	{ "queues", NULL, func_get_queue },
+	{ "rx_counters", NULL, func_get_rx_counters },
+	{ "tx_counters", NULL, func_get_tx_counters },
+	{ "flm_layout_version", NULL, func_get_flm_layout_version },
+	{ NULL, NULL, NULL },
+};
+
+/*
+ * Entry level
+ */
+static int func_snapshot(void *hdl, int client_id, struct ntconn_header_s *hdr,
+			 char **data, int *len);
+static struct func_s stat_entry_funcs[] = {
+	{ "get", funcs_get_level1, NULL },
+	{ "snapshot", NULL, func_snapshot },
+	{ NULL, NULL, NULL },
+};
+
+static int read_flm(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	struct rte_eth_xstat stats[100];
+	struct rte_eth_xstat_name names[100];
+	int cnt_names = nthw_xstats_get_names(hwstat, names, 100,
+					      hwstat->mp_nthw_stat->mb_is_vswitch);
+	int cnt_values = nthw_xstats_get(hwstat, stats, 100,
+					 hwstat->mp_nthw_stat->mb_is_vswitch, 0);
+	assert(cnt_names == cnt_values);
+
+	/* virt/cap same */
+	struct flowmatcher_type_fields_s *flm =
+		(struct flowmatcher_type_fields_s *)cdata->data;
+	if (hwstat->mp_stat_structs_flm) {
+		int c;
+
+		for (c = 0; c < nbc; c++) {
+			flm->current = hwstat->mp_stat_structs_flm->current;
+			flm->learn_done = hwstat->mp_stat_structs_flm->learn_done;
+			flm->learn_ignore =
+				hwstat->mp_stat_structs_flm->learn_ignore;
+			flm->learn_fail = hwstat->mp_stat_structs_flm->learn_fail;
+			flm->unlearn_done =
+				hwstat->mp_stat_structs_flm->unlearn_done;
+			flm->unlearn_ignore =
+				hwstat->mp_stat_structs_flm->unlearn_ignore;
+			flm->auto_unlearn_done =
+				hwstat->mp_stat_structs_flm->auto_unlearn_done;
+			flm->auto_unlearn_ignore =
+				hwstat->mp_stat_structs_flm->auto_unlearn_ignore;
+			flm->auto_unlearn_fail =
+				hwstat->mp_stat_structs_flm->auto_unlearn_fail;
+			flm->timeout_unlearn_done =
+				hwstat->mp_stat_structs_flm->timeout_unlearn_done;
+			flm->rel_done = hwstat->mp_stat_structs_flm->rel_done;
+			flm->rel_ignore = hwstat->mp_stat_structs_flm->rel_ignore;
+			flm->prb_done = hwstat->mp_stat_structs_flm->prb_done;
+			flm->prb_ignore = hwstat->mp_stat_structs_flm->prb_ignore;
+
+			flm->sta_done = hwstat->mp_stat_structs_flm->sta_done;
+			flm->inf_done = hwstat->mp_stat_structs_flm->inf_done;
+			flm->inf_skip = hwstat->mp_stat_structs_flm->inf_skip;
+			flm->pck_hit = hwstat->mp_stat_structs_flm->pck_hit;
+			flm->pck_miss = hwstat->mp_stat_structs_flm->pck_miss;
+			flm->pck_unh = hwstat->mp_stat_structs_flm->pck_unh;
+			flm->pck_dis = hwstat->mp_stat_structs_flm->pck_dis;
+			flm->csh_hit = hwstat->mp_stat_structs_flm->csh_hit;
+			flm->csh_miss = hwstat->mp_stat_structs_flm->csh_miss;
+			flm->csh_unh = hwstat->mp_stat_structs_flm->csh_unh;
+			flm->cuc_start = hwstat->mp_stat_structs_flm->cuc_start;
+			flm->cuc_move = hwstat->mp_stat_structs_flm->cuc_move;
+		}
+	} else {
+		memset(flm, 0, sizeof(*hwstat->mp_stat_structs_flm));
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_FLOWMATCHER + STAT_INFO_ELEMENTS;
+}
+
+static int read_colors(nt4ga_stat_t *hwstat, uint64_t *val, int nbc)
+{
+	struct ntc_stat_get_data_s *cdata = (struct ntc_stat_get_data_s *)val;
+
+	cdata->nb_counters = (uint64_t)nbc;
+	cdata->timestamp = hwstat->last_timestamp;
+	cdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct color_type_fields_s *clr =
+		(struct color_type_fields_s *)cdata->data;
+	int c;
+
+	for (c = 0; c < nbc; c++) {
+		clr->pkts = hwstat->mp_stat_structs_color[c].color_packets;
+		clr->octets = hwstat->mp_stat_structs_color[c].color_bytes;
+		clr->tcp_flgs =
+			(uint64_t)hwstat->mp_stat_structs_color[c].tcp_flags;
+		clr++;
+	}
+	return nbc * NUM_STAT_RECORD_TYPE_COLOR + STAT_INFO_ELEMENTS;
+}
+
+static int read_queues(nt4ga_stat_t *hwstat, uint64_t *val, int nbq)
+{
+	struct ntc_stat_get_data_s *qdata = (struct ntc_stat_get_data_s *)val;
+
+	qdata->nb_counters = (uint64_t)nbq;
+	qdata->timestamp = hwstat->last_timestamp;
+	qdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	/* virt/cap same */
+	struct queue_type_fields_s *queue =
+		(struct queue_type_fields_s *)qdata->data;
+	int q;
+
+	for (q = 0; q < nbq; q++) {
+		queue->flush_pkts = hwstat->mp_stat_structs_hb[q].flush_packets;
+		queue->drop_pkts = hwstat->mp_stat_structs_hb[q].drop_packets;
+		queue->fwd_pkts = hwstat->mp_stat_structs_hb[q].fwd_packets;
+		queue->dbs_drop_pkts = hwstat->mp_stat_structs_hb[q].dbs_drop_packets;
+		queue->flush_octets = hwstat->mp_stat_structs_hb[q].flush_bytes;
+		queue->drop_octets = hwstat->mp_stat_structs_hb[q].drop_bytes;
+		queue->fwd_octets = hwstat->mp_stat_structs_hb[q].fwd_bytes;
+		queue->dbs_drop_octets = hwstat->mp_stat_structs_hb[q].dbs_drop_bytes;
+		queue++;
+	}
+	return nbq * NUM_STAT_RECORD_TYPE_QUEUE + STAT_INFO_ELEMENTS;
+}
+
+static void copy_rmon_stat(struct port_counters_v2 *cptr,
+			    struct stat_rmon_s *rmon)
+{
+	rmon->drop_events = cptr->drop_events;
+	rmon->pkts = cptr->pkts;
+	rmon->octets = cptr->octets;
+	rmon->broadcast_pkts = cptr->broadcast_pkts;
+	rmon->multicast_pkts = cptr->multicast_pkts;
+	rmon->unicast_pkts = cptr->unicast_pkts;
+	rmon->pkts_alignment = cptr->pkts_alignment;
+	rmon->pkts_code_violation = cptr->pkts_code_violation;
+	rmon->pkts_crc = cptr->pkts_crc;
+	rmon->undersize_pkts = cptr->undersize_pkts;
+	rmon->oversize_pkts = cptr->oversize_pkts;
+	rmon->fragments = cptr->fragments;
+	rmon->jabbers_not_truncated = cptr->jabbers_not_truncated;
+	rmon->jabbers_truncated = cptr->jabbers_truncated;
+	rmon->pkts_64_octets = cptr->pkts_64_octets;
+	rmon->pkts_65_to_127_octets = cptr->pkts_65_to_127_octets;
+	rmon->pkts_128_to_255_octets = cptr->pkts_128_to_255_octets;
+	rmon->pkts_256_to_511_octets = cptr->pkts_256_to_511_octets;
+	rmon->pkts_512_to_1023_octets = cptr->pkts_512_to_1023_octets;
+	rmon->pkts_1024_to_1518_octets = cptr->pkts_1024_to_1518_octets;
+	rmon->pkts_1519_to_2047_octets = cptr->pkts_1519_to_2047_octets;
+	rmon->pkts_2048_to_4095_octets = cptr->pkts_2048_to_4095_octets;
+	rmon->pkts_4096_to_8191_octets = cptr->pkts_4096_to_8191_octets;
+	rmon->pkts_8192_to_max_octets = cptr->pkts_8192_to_max_octets;
+}
+
+static int read_rx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *rxdata = (struct ntc_stat_get_data_s *)val;
+
+	rxdata->nb_counters = (uint64_t)nbp;
+	rxdata->timestamp = hwstat->last_timestamp;
+	rxdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (rxdata->is_virt) {
+		struct rtx_type_fields_virt_s *rxc =
+			(struct rtx_type_fields_virt_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			rxc->octets =
+				hwstat->virt.mp_stat_structs_port_rx[p].octets;
+			rxc->pkts = hwstat->virt.mp_stat_structs_port_rx[p].pkts;
+			rxc->drop_events =
+				hwstat->virt.mp_stat_structs_port_rx[p].drop_events;
+			rxc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_rx[p]
+				.qos_drop_octets;
+			rxc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_rx[p]
+					     .qos_drop_pkts;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct rx_type_fields_cap_s *rxc =
+			(struct rx_type_fields_cap_s *)rxdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_rx[p],
+					&rxc->rmon);
+
+			/* Rx only port counters */
+			rxc->mac_drop_events =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.mac_drop_events;
+			rxc->pkts_lr =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_lr;
+			rxc->duplicate =
+				hwstat->cap.mp_stat_structs_port_rx[p].duplicate;
+			rxc->pkts_ip_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_ip_chksum_error;
+			rxc->pkts_udp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_udp_chksum_error;
+			rxc->pkts_tcp_chksum_error =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_tcp_chksum_error;
+			rxc->pkts_giant_undersize =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_giant_undersize;
+			rxc->pkts_baby_giant =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_baby_giant;
+			rxc->pkts_not_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_not_isl_vlan_mpls;
+			rxc->pkts_isl =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl;
+			rxc->pkts_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_vlan;
+			rxc->pkts_isl_vlan =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan;
+			rxc->pkts_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_mpls;
+			rxc->pkts_isl_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls;
+			rxc->pkts_vlan_mpls = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_vlan_mpls;
+			rxc->pkts_isl_vlan_mpls =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_isl_vlan_mpls;
+			rxc->pkts_no_filter = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .pkts_no_filter;
+			rxc->pkts_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_dedup_drop;
+			rxc->pkts_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.pkts_filter_drop;
+			rxc->pkts_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_overflow;
+			rxc->pkts_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop;
+			rxc->octets_no_filter =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_no_filter;
+			rxc->octets_dedup_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dedup_drop;
+			rxc->octets_filter_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_filter_drop;
+			rxc->octets_overflow =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_overflow;
+			rxc->octets_dbs_drop =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.octets_dbs_drop;
+			rxc->ipft_first_hit = hwstat->cap.mp_stat_structs_port_rx[p]
+					      .ipft_first_hit;
+			rxc->ipft_first_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_first_not_hit;
+			rxc->ipft_mid_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit;
+			rxc->ipft_mid_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_mid_not_hit;
+			rxc->ipft_last_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p].ipft_last_hit;
+			rxc->ipft_last_not_hit =
+				hwstat->cap.mp_stat_structs_port_rx[p]
+				.ipft_last_not_hit;
+			rxc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_RX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int read_tx_counters(nt4ga_stat_t *hwstat, uint64_t *val, int nbp)
+{
+	struct ntc_stat_get_data_s *txdata = (struct ntc_stat_get_data_s *)val;
+
+	txdata->nb_counters = (uint64_t)nbp;
+	txdata->timestamp = hwstat->last_timestamp;
+	txdata->is_virt = hwstat->mp_nthw_stat->mb_is_vswitch;
+
+	if (txdata->is_virt) {
+		struct rtx_type_fields_virt_s *txc =
+			(struct rtx_type_fields_virt_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			txc->octets =
+				hwstat->virt.mp_stat_structs_port_tx[p].octets;
+			txc->pkts = hwstat->virt.mp_stat_structs_port_tx[p].pkts;
+			txc->drop_events =
+				hwstat->virt.mp_stat_structs_port_tx[p].drop_events;
+			txc->qos_drop_octets =
+				hwstat->virt.mp_stat_structs_port_tx[p]
+				.qos_drop_octets;
+			txc->qos_drop_pkts = hwstat->virt.mp_stat_structs_port_tx[p]
+					     .qos_drop_pkts;
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_VIRT +
+		       STAT_INFO_ELEMENTS;
+	} else {
+		struct tx_type_fields_cap_s *txc =
+			(struct tx_type_fields_cap_s *)txdata->data;
+		int p;
+
+		for (p = 0; p < nbp; p++) {
+			copy_rmon_stat(&hwstat->cap.mp_stat_structs_port_tx[p],
+					&txc->rmon);
+			txc->rmon.pkts = hwstat->a_port_tx_packets_total[p];
+			txc++;
+		}
+		return nbp * NUM_STAT_RECORD_TYPE_TX_PORT_CAP +
+		       STAT_INFO_ELEMENTS;
+	}
+}
+
+static int func_get_layout_version(void *hdl, int client_id _unused,
+				   struct ntconn_header_s *hdr _unused,
+				   char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = stat->p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+static int func_get_flm_layout_version(void *hdl, int client_id _unused,
+				       struct ntconn_header_s *hdr _unused,
+				       char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	*data = malloc(sizeof(int));
+	if (!*data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	*(int *)*data = (stat->p_nt4ga_stat->flm_stat_ver < 18) ? 1 : 2;
+	*len = sizeof(int);
+	return REQUEST_OK;
+}
+
+/*
+ * Return total number of 64bit counters occupied by this stat type
+ * additionally, returns total number of records for this type (ie number of queues, ports, etc)
+ */
+static int get_size(struct stat_hdl *stat, enum stat_type_e type,
+		     int *num_records)
+{
+	int nrec = 0;
+	int size = 0;
+
+	switch (type) {
+	case STAT_TYPE_COLOR:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_color_counters / 2;
+		size = nrec * NUM_STAT_RECORD_TYPE_COLOR;
+		break;
+	case STAT_TYPE_QUEUE:
+		nrec = stat->p_nt4ga_stat->mp_nthw_stat->m_nb_rx_host_buffers;
+		size = nrec * NUM_STAT_RECORD_TYPE_QUEUE;
+		break;
+	case STAT_TYPE_RX:
+		nrec = stat->p_nt4ga_stat->mn_rx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_RX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_RX_PORT_CAP);
+		break;
+	case STAT_TYPE_TX:
+		nrec = stat->p_nt4ga_stat->mn_tx_ports;
+		size = nrec * ((stat->p_nt4ga_stat->mp_nthw_stat->mb_is_vswitch) ?
+			       NUM_STAT_RECORD_TYPE_TX_PORT_VIRT :
+			       NUM_STAT_RECORD_TYPE_TX_PORT_CAP);
+		break;
+	case STAT_TYPE_FLOWMATCHER:
+		nrec = 1;
+		size = nrec * NUM_STAT_RECORD_TYPE_FLOWMATCHER;
+		break;
+	}
+
+	*num_records = nrec;
+	return size + STAT_INFO_ELEMENTS;
+}
+
+static int do_get_stats(struct stat_hdl *stat, char **data, int *len,
+			enum stat_type_e stype,
+			int (*read_counters)(nt4ga_stat_t *, uint64_t *, int))
+{
+	int nbg;
+	int size = get_size(stat, stype, &nbg);
+
+	size *= sizeof(uint64_t);
+	uint64_t *val = (uint64_t *)malloc(size);
+
+	if (!val) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	read_counters(stat->p_nt4ga_stat, val, nbg);
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = (char *)val;
+	*len = size;
+	return REQUEST_OK;
+}
+
+/*
+ * Stat Request functions
+ */
+static int func_get_flm(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr _unused, char **data,
+			int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_FLOWMATCHER, read_flm);
+}
+
+static int func_get_color(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_COLOR, read_colors);
+}
+
+static int func_get_queue(void *hdl, int client_id _unused,
+			  struct ntconn_header_s *hdr _unused, char **data,
+			  int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_QUEUE, read_queues);
+}
+
+static int func_get_rx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_RX, read_rx_counters);
+}
+
+static int func_get_tx_counters(void *hdl, int client_id _unused,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	return do_get_stats(stat, data, len, STAT_TYPE_TX, read_tx_counters);
+}
+
+/*
+ * Snapshot handling. This is to ensure atomic reading of all statistics in one collection
+ */
+
+static struct snaps_s *find_client_snap_data(struct stat_hdl *stat,
+		int client_id,
+		struct snaps_s **parent)
+{
+	struct snaps_s *snaps = stat->snaps_base;
+
+	if (parent)
+		*parent = NULL;
+	while (snaps && snaps->client_id != client_id) {
+		if (parent)
+			*parent = snaps;
+		snaps = snaps->next;
+	}
+
+	return snaps;
+}
+
+static struct snaps_s *get_client_snap_data(struct stat_hdl *stat,
+		int client_id)
+{
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps) {
+		snaps = malloc(sizeof(struct snaps_s)); /* return NULL on malloc failure */
+		if (snaps) {
+			snaps->client_id = client_id;
+			snaps->next = stat->snaps_base;
+			stat->snaps_base = snaps;
+			snaps->buffer = NULL;
+		}
+	}
+	return snaps;
+}
+
+static int func_snapshot(void *hdl, int client_id,
+			 struct ntconn_header_s *hdr _unused, char **data,
+			 int *len)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	int nbc, nbq, nbpr, nbpt;
+	struct snaps_s *snaps;
+
+	if (!stat->p_nt4ga_stat || !stat->p_nt4ga_stat->mp_nthw_stat) {
+		*data = NULL;
+		*len = 0;
+		return REQUEST_ERR;
+	}
+	snaps = get_client_snap_data(stat, client_id);
+	if (!snaps)
+		goto err_out;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+
+	snaps->snap_addr[SNAP_COLORS].size =
+		(unsigned int)get_size(stat, STAT_TYPE_COLOR, &nbc);
+	snaps->snap_addr[SNAP_QUEUES].size =
+		(unsigned int)get_size(stat, STAT_TYPE_QUEUE, &nbq);
+	snaps->snap_addr[SNAP_RX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_RX, &nbpr);
+	snaps->snap_addr[SNAP_TX_PORT].size =
+		(unsigned int)get_size(stat, STAT_TYPE_TX, &nbpt);
+
+	unsigned int tot_size = snaps->snap_addr[SNAP_COLORS].size +
+				snaps->snap_addr[SNAP_QUEUES].size +
+				snaps->snap_addr[SNAP_RX_PORT].size +
+				snaps->snap_addr[SNAP_TX_PORT].size;
+
+	snaps->buffer = malloc(tot_size * sizeof(uint64_t));
+	if (!snaps->buffer) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	uint64_t *val = snaps->buffer;
+
+	snaps->snap_addr[SNAP_COLORS].ptr = val;
+	pthread_mutex_lock(&stat->drv->ntdrv.stat_lck);
+	unsigned int size = read_colors(stat->p_nt4ga_stat, val, nbc);
+
+	if (size != snaps->snap_addr[SNAP_COLORS].size) {
+		NT_LOG(ERR, NTCONNECT, "stat.snapshot: color size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_QUEUES].ptr = val;
+	size = read_queues(stat->p_nt4ga_stat, val, nbq);
+	if (size != snaps->snap_addr[SNAP_QUEUES].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: queue statistic size mismatch");
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_RX_PORT].ptr = val;
+	size = read_rx_counters(stat->p_nt4ga_stat, val, nbpr);
+	if (size != snaps->snap_addr[SNAP_RX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Rx port statistic size mismatch %i, %i",
+		       size, snaps->snap_addr[SNAP_RX_PORT].size);
+		goto err_out;
+	}
+
+	val += size;
+	snaps->snap_addr[SNAP_TX_PORT].ptr = val;
+	size = read_tx_counters(stat->p_nt4ga_stat, val, nbpt);
+	if (size != snaps->snap_addr[SNAP_TX_PORT].size) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat.snapshot: Tx port statistic size mismatch");
+		goto err_out;
+	}
+
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+
+	*data = NULL;
+	*len = 0;
+	return REQUEST_OK;
+
+err_out:
+	pthread_mutex_unlock(&stat->drv->ntdrv.stat_lck);
+	return ntconn_error(data, len, "stat",
+			    NTCONN_ERR_CODE_INTERNAL_REPLY_ERROR);
+}
+
+static int get_snap_data(void *hdl, int client_id, char **data, int *len,
+			  enum snap_addr_select_e snap_addr_idx)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps = find_client_snap_data(stat, client_id, NULL);
+
+	if (!snaps || !snaps->buffer)
+		return ntconn_error(data, len, "stat", NTCONN_ERR_CODE_NO_DATA);
+
+	int ln = snaps->snap_addr[snap_addr_idx].size * sizeof(uint64_t);
+
+	*data = malloc(ln);
+	if (!data) {
+		*len = 0;
+		NT_LOG(ERR, NTCONNECT, "memory allocation failed");
+		return REQUEST_ERR;
+	}
+	memcpy(*data, snaps->snap_addr[snap_addr_idx].ptr, ln);
+	*len = ln;
+
+	return REQUEST_OK;
+}
+
+static int func_get_snap_colors(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_COLORS);
+}
+
+static int func_get_snap_queues(void *hdl, int client_id,
+				struct ntconn_header_s *hdr _unused,
+				char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_QUEUES);
+}
+
+static int func_get_snap_rx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_RX_PORT);
+}
+
+static int func_get_snap_tx_port(void *hdl, int client_id,
+				 struct ntconn_header_s *hdr _unused,
+				 char **data, int *len)
+{
+	return get_snap_data(hdl, client_id, data, len, SNAP_TX_PORT);
+}
+
+/*
+ * Stat main request function
+ */
+static int stat_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				stat_entry_funcs, data, len, 0);
+}
+
+static void stat_free_data(void *hdl _unused, char *data)
+{
+	free(data);
+}
+
+static void stat_client_cleanup(void *hdl, int client_id)
+{
+	struct stat_hdl *stat = (struct stat_hdl *)hdl;
+	struct snaps_s *snaps_parent;
+	struct snaps_s *snaps =
+		find_client_snap_data(stat, client_id, &snaps_parent);
+
+	if (!snaps)
+		return;
+
+	if (snaps_parent)
+		snaps_parent->next = snaps->next;
+	else
+		stat->snaps_base = snaps->next;
+
+	if (snaps->buffer)
+		free(snaps->buffer);
+	free(snaps);
+}
+
+static const ntconnapi_t ntconn_stat_op = {
+	this_module_name, STAT_VERSION_MAJOR, STAT_VERSION_MINOR,
+	stat_request,	  stat_free_data,     stat_client_cleanup
+};
+
+int ntconn_stat_register(struct drv_s *drv)
+{
+	stat_hdl.drv = drv;
+	stat_hdl.p_nt4ga_stat = &drv->ntdrv.adapter_info.nt4ga_stat;
+
+	/* Check supported Layout_versions by this module */
+	size_t i;
+
+	for (i = 0; i < NUM_LAYOUT_VERSIONS_SUPPORTED; i++) {
+		if (stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version ==
+				layout_versions_supported[i])
+			break;
+	}
+
+	if (i == NUM_LAYOUT_VERSIONS_SUPPORTED) {
+		NT_LOG(ERR, NTCONNECT,
+		       "stat: layout version %i is not supported. Module will not be activated",
+		       stat_hdl.p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version);
+		return -1;
+	}
+
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&stat_hdl,
+				   &ntconn_stat_op);
+}
diff --git a/drivers/net/ntnic/ntconnect_modules/ntconn_test.c b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
new file mode 100644
index 0000000000..907ea4ff5f
--- /dev/null
+++ b/drivers/net/ntnic/ntconnect_modules/ntconn_test.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <errno.h>
+#include "ntnic_ethdev.h"
+#include "ntconnect.h"
+#include "ntos_system.h"
+#include "ntconn_modules.h"
+#include "ntconn_mod_helper.h"
+#include "nt_util.h"
+#include "ntlog.h"
+#include "ntnic_vf_vdpa.h"
+
+#include "ntconnect_api_test.h"
+
+#define NTCONN_TEST_VERSION_MAJOR 0U
+#define NTCONN_TEST_VERSION_MINOR 1U
+
+#define this_module_name "ntconnect_test"
+
+#define MAX_CLIENTS 32
+
+#define UNUSED __rte_unused
+
+static struct test_hdl_s {
+	struct drv_s *drv;
+} test_hdl[MAX_CLIENTS];
+
+/*
+ * Test functions
+ */
+static int func_test(void *hdl, int client_id, struct ntconn_header_s *hdr,
+		     char **data, int *len);
+static struct func_s adapter_entry_funcs[] = {
+	{ "test", NULL, func_test },
+	{ NULL, NULL, NULL },
+};
+
+static int func_test(void *hdl _unused, int client_id _unused,
+		     struct ntconn_header_s *hdr, char **data, int *len)
+{
+	int status = 0;
+	int number = 0;
+	uint32_t size;
+	struct test_s *test_cpy = (struct test_s *)&(*data)[hdr->len];
+
+	if (hdr->blob_len < sizeof(struct test_s)) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: to small");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	number = test_cpy->number;
+	size = sizeof(struct test_s) + sizeof(uint64_t) * number;
+
+	if (hdr->blob_len != size) {
+		NT_LOG(ERR, NTCONNECT, "Error in test data: wrong size");
+		status = -1;
+		goto TEST_ERROR;
+	}
+
+	{
+		*data = malloc(sizeof(struct test_s) +
+			       number * sizeof(uint64_t));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s) + number * sizeof(uint64_t);
+		for (int i = 0; i < number; i++)
+			return_value->test[i] = test_cpy->test[i];
+		return_value->status = 0;
+		return_value->number = number;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR:
+
+	{
+		*data = malloc(sizeof(struct test_s));
+		if (!*data)
+			goto TEST_ERROR_MALLOC;
+		struct test_s *return_value = (struct test_s *)*data;
+		*len = sizeof(struct test_s);
+		return_value->status = status;
+		return_value->number = 0;
+		return REQUEST_OK;
+	}
+
+TEST_ERROR_MALLOC:
+
+	*len = 0;
+	NT_LOG(ERR, NTCONNECT, "Not able to allocate memory %s", __func__);
+	return REQUEST_ERR;
+}
+
+enum {
+	FLOW_API_FUNC_CREATE,
+	FLOW_API_FUNC_VALIDATE,
+};
+
+static int test_request(void *hdl, int client_id _unused,
+			struct ntconn_header_s *hdr, char *function,
+			char **data, int *len)
+{
+	return execute_function(this_module_name, hdl, client_id, hdr, function,
+				adapter_entry_funcs, data, len, 0);
+}
+
+static void test_free_data(void *hdl _unused, char *data)
+{
+	if (data)
+		free(data);
+}
+
+static void test_client_cleanup(void *hdl _unused, int client_id _unused)
+{
+	/* Nothing to do */
+}
+
+static const ntconnapi_t ntconn_test_op = { this_module_name,
+					    NTCONN_TEST_VERSION_MAJOR,
+					    NTCONN_TEST_VERSION_MINOR,
+					    test_request,
+					    test_free_data,
+					    test_client_cleanup
+					  };
+
+int ntconn_test_register(struct drv_s *drv)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENTS; i++) {
+		if (test_hdl[i].drv == NULL)
+			break;
+	}
+	if (i == MAX_CLIENTS) {
+		NT_LOG(ERR, NTCONNECT,
+		       "Cannot register more adapters into NtConnect framework");
+		return -1;
+	}
+
+	test_hdl[i].drv = drv;
+	return register_ntconn_mod(&drv->p_dev->addr, (void *)&test_hdl[i],
+				   &ntconn_test_op);
+}
-- 
2.39.3


^ permalink raw reply related	[flat|nested] 142+ messages in thread

* Re: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-09-08 16:07 ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
                     ` (6 preceding siblings ...)
  2023-09-08 16:07   ` [PATCH v16 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
@ 2023-09-15 15:54   ` Ferruh Yigit
  2023-09-15 18:37     ` Morten Brørup
  7 siblings, 1 reply; 142+ messages in thread
From: Ferruh Yigit @ 2023-09-15 15:54 UTC (permalink / raw)
  To: Mykola Kostenok, dev, Christiam Muf; +Cc: thomas, ckm, andrew.rybchenko

On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
> From: Christian Koue Muf <ckm@napatech.com>
> 
> The NTNIC PMD does not rely on a kernel space Napatech driver,
> thus all defines related to the register layout is part of the PMD
> code, which will be added in later commits.
> 
> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
>

Hi Mykola, Christiam,

This PMD scares me, overall it is a big drop:
"249 files changed, 87128 insertions(+)"

I think it is not possible to review all in one release cycle, and it is
not even possible to say if all code used or not.

I can see code is already developed, and it is difficult to restructure
developed code, but restructure it into small pieces really helps for
reviews.


Driver supports good list of features, can it be possible to distribute
upstream effort into multiple release.
Starting from basic functionality and add features gradually.
Target for this release can be providing datapath, and add more if we
have time in the release, what do you think?


Also there are large amount of base code (HAL / FPGA code), instead of
adding them as a bulk, relevant ones with a feature can be added with
the feature patch, this eliminates dead code in the base code layer,
also helps user/review to understand the link between driver code and
base code.


As far as I understand last patch opens a socket interface and an
external application can sent control commands via this interface.
I am not sure about this side control channel, what is missing in the
DPDK API? Can we try to address them in the DPDK layer instead of a
driver specific solution?


Thanks,
ferruh



^ permalink raw reply	[flat|nested] 142+ messages in thread

* RE: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-09-15 15:54   ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Ferruh Yigit
@ 2023-09-15 18:37     ` Morten Brørup
  2023-09-18  9:33       ` Ferruh Yigit
  0 siblings, 1 reply; 142+ messages in thread
From: Morten Brørup @ 2023-09-15 18:37 UTC (permalink / raw)
  To: Ferruh Yigit, Mykola Kostenok, dev, Christiam Muf
  Cc: thomas, ckm, andrew.rybchenko

> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
> Sent: Friday, 15 September 2023 17.55
> 
> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
> > From: Christian Koue Muf <ckm@napatech.com>
> >
> > The NTNIC PMD does not rely on a kernel space Napatech driver,
> > thus all defines related to the register layout is part of the PMD
> > code, which will be added in later commits.
> >
> > Signed-off-by: Christian Koue Muf <ckm@napatech.com>
> > Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
> >
> 
> Hi Mykola, Christiam,
> 
> This PMD scares me, overall it is a big drop:
> "249 files changed, 87128 insertions(+)"
> 
> I think it is not possible to review all in one release cycle, and it is
> not even possible to say if all code used or not.
> 
> I can see code is already developed, and it is difficult to restructure
> developed code, but restructure it into small pieces really helps for
> reviews.
> 
> 
> Driver supports good list of features, can it be possible to distribute
> upstream effort into multiple release.
> Starting from basic functionality and add features gradually.
> Target for this release can be providing datapath, and add more if we
> have time in the release, what do you think?
> 
> 
> Also there are large amount of base code (HAL / FPGA code), instead of
> adding them as a bulk, relevant ones with a feature can be added with
> the feature patch, this eliminates dead code in the base code layer,
> also helps user/review to understand the link between driver code and
> base code.

Jumping in here with an opinion about welcoming new NIC vendors to the community:

Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.

If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.

We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.

> 
> 
> As far as I understand last patch opens a socket interface and an
> external application can sent control commands via this interface.
> I am not sure about this side control channel, what is missing in the
> DPDK API? Can we try to address them in the DPDK layer instead of a
> driver specific solution?

That would be great.

AFAIK, other vendors also has a bunch of out-of-band communication, e.g. magical EAL parameters to the MLX drivers. So let's not be too hard on the newcomers. ;-)

> 
> 
> Thanks,
> ferruh

Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!

Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.

-Morten


^ permalink raw reply	[flat|nested] 142+ messages in thread

* Re: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-09-15 18:37     ` Morten Brørup
@ 2023-09-18  9:33       ` Ferruh Yigit
  2023-09-19  9:06         ` Christian Koue Muf
  0 siblings, 1 reply; 142+ messages in thread
From: Ferruh Yigit @ 2023-09-18  9:33 UTC (permalink / raw)
  To: Morten Brørup, Mykola Kostenok, dev, Christiam Muf
  Cc: thomas, andrew.rybchenko, techboard

On 9/15/2023 7:37 PM, Morten Brørup wrote:
>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
>> Sent: Friday, 15 September 2023 17.55
>>
>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
>>> From: Christian Koue Muf <ckm@napatech.com>
>>>
>>> The NTNIC PMD does not rely on a kernel space Napatech driver,
>>> thus all defines related to the register layout is part of the PMD
>>> code, which will be added in later commits.
>>>
>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
>>>
>>
>> Hi Mykola, Christiam,
>>
>> This PMD scares me, overall it is a big drop:
>> "249 files changed, 87128 insertions(+)"
>>
>> I think it is not possible to review all in one release cycle, and it is
>> not even possible to say if all code used or not.
>>
>> I can see code is already developed, and it is difficult to restructure
>> developed code, but restructure it into small pieces really helps for
>> reviews.
>>
>>
>> Driver supports good list of features, can it be possible to distribute
>> upstream effort into multiple release.
>> Starting from basic functionality and add features gradually.
>> Target for this release can be providing datapath, and add more if we
>> have time in the release, what do you think?
>>
>>
>> Also there are large amount of base code (HAL / FPGA code), instead of
>> adding them as a bulk, relevant ones with a feature can be added with
>> the feature patch, this eliminates dead code in the base code layer,
>> also helps user/review to understand the link between driver code and
>> base code.
> 
> Jumping in here with an opinion about welcoming new NIC vendors to the community:
> 
> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.
> 
> If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.
> 

Hi Morten,

I was thinking same before making my above comment, what happens if
vendors submit as one big patch and when a problem occurs we can ask
owner to fix. Probably this makes vendor happy and makes my life (or any
other maintainer's life) easier, it is always easier to say yes.


But I come up with two main reasons to ask for a rework:

1- Technically any vendor can deliver their software to their customers
via a public git repository, they don't have to upstream to dpdk.org,
but upstreaming has many benefits.

One of those benefits is upstreaming provides a quality assurance for
vendor's customers (that is why customer can be asking for this, as we
are having in many cases), and this quality assurance comes from
additional eyes reviewing the code and guiding vendors for the DPDK
quality standards (some vendors already doing pretty good, but new ones
sometimes requires hand-holding).

If driver is one big patch series, it is practically not possible to
review it, I can catch a few bits here or there, you may some others,
but practically it will be merged without review, and we will fail on
our quality assurance task.

2- Make code more accessible to the rest of the world.

When it is a big patch, code can be functional but lots of details,
reasoning, relation between components gets lost, which makes it even
harder for an external developer, like me, to understand it (I am a mere
guinea pig here :).

If a customer would like to add a feature themselves, or fix something,
even after vendor no more working on that product anymore, customer
needs to understand the code or some reasoning in the code.
Or if someone wants to backport the driver to rust, or a DPDK developer
wants to do a rework that requires updating all drivers, or a tester
would like to analyze the code to figure out behavior difference of the
devices. I think I have witness all above cases in real life.

If driver is split into more patches, it makes patch easier to
understand which makes code practically more accessible to other
developers that are not expert in driver.


Overall, yes splitting patch takes time and effort, and yes this is an
overhead for a code that is already developed, but I think benefit is
big so it worth doing the task.


> We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.
> 

Agree to not make upstreaming difficult for new vendors, and indeed we
are encouraging more vendors to be upstream their code, this is in best
interest of both sides.

Distributing upstreaming effort to a year was just a suggestion, it can
go in earlier as it is becomes ready but I can see it will take time to
split driver into features and upstream them.

As I am from a vendor too, I can understand the product/customer
pressure, but I hope this approach can encourage vendors start
upstreaming early or even better upstream as they develop the code.


>>
>>
>> As far as I understand last patch opens a socket interface and an
>> external application can sent control commands via this interface.
>> I am not sure about this side control channel, what is missing in the
>> DPDK API? Can we try to address them in the DPDK layer instead of a
>> driver specific solution?
> 
> That would be great.
> 
> AFAIK, other vendors also has a bunch of out-of-band communication, e.g. magical EAL parameters to the MLX drivers. So let's not be too hard on the newcomers. ;-)
> 

I did some thinking for this one too,

As we are in userspace, it is easy to have side control channel, and
this can make users life easy, so this is a practical thing to do.
(Indeed there are already some ways to do this, without PMD exposing a
socket interface.)

But this also reduces effort developers putting on DPDK layer solution,
because it is always easier to add more support to the driver only.
And overall this reduces portability of the DPDK application, each
application becomes unique to a device (This is a bad thing, but I also
need some feedback how bad it is in real life.)

To balance this, we said if a feature is too specific to a device, it
can add device specific API and this is better than device specific
features pollute the common, most used code. And push back to introduce
more new PMD specific APIs unless it is really needed.

But creating a socket interface directly from the driver is more than
PMD specific API. Technically application control interface can rely
completely to this. Even we assume this is not for control, but just for
debug, I can see it can be useful for debug and again practical thing to
do, I am still not sure how much it hurts if each driver has a custom
socket interface for their debug needs.

Overall it makes more sense to me to have a unified/common interface
from drivers to DPDK applications, which is through the ethdev layer.
And improve and extend the ethdev layer to satisfy driver needs.

In this specific example, I am for rejecting the socket interface patch,
but I would like to get more feedback from @techboard.


And related to not being too hard on the newcomers, unrelated to being a
newcomer or not, if a process/feature/approach approved once, some
others will point to it and will ask to do the same which is fair in
their perspective. I had multiple instance of this in the past.

Of course we are being easy to newcomers but not in a way to allow code
that we believe is not good thing to do, but going easy on process may be :)


>>
>>
>> Thanks,
>> ferruh
> 
> Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!
> 
> Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.
> 
> -Morten
> 


^ permalink raw reply	[flat|nested] 142+ messages in thread

* RE: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-09-18  9:33       ` Ferruh Yigit
@ 2023-09-19  9:06         ` Christian Koue Muf
  2023-09-20  9:48           ` Ferruh Yigit
  2023-09-20 13:17           ` Thomas Monjalon
  0 siblings, 2 replies; 142+ messages in thread
From: Christian Koue Muf @ 2023-09-19  9:06 UTC (permalink / raw)
  To: Ferruh Yigit, Morten Brørup, Mykola Kostenok, dev
  Cc: thomas, andrew.rybchenko, techboard

On 9/18/23 10:34 AM, Ferruh Yigit wrote:
>On 9/15/2023 7:37 PM, Morten Brørup wrote:
>>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
>>> Sent: Friday, 15 September 2023 17.55
>>>
>>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
>>>> From: Christian Koue Muf <ckm@napatech.com>
>>>>
>>>> The NTNIC PMD does not rely on a kernel space Napatech driver, thus 
>>>> all defines related to the register layout is part of the PMD code, 
>>>> which will be added in later commits.
>>>>
>>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
>>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
>>>>
>>>
>>> Hi Mykola, Christiam,
>>>
>>> This PMD scares me, overall it is a big drop:
>>> "249 files changed, 87128 insertions(+)"
>>>
>>> I think it is not possible to review all in one release cycle, and it 
>>> is not even possible to say if all code used or not.
>>>
>>> I can see code is already developed, and it is difficult to 
>>> restructure developed code, but restructure it into small pieces 
>>> really helps for reviews.
>>>
>>>
>>> Driver supports good list of features, can it be possible to 
>>> distribute upstream effort into multiple release.
>>> Starting from basic functionality and add features gradually.
>>> Target for this release can be providing datapath, and add more if we 
>>> have time in the release, what do you think?
>>>
>>>
>>> Also there are large amount of base code (HAL / FPGA code), instead 
>>> of adding them as a bulk, relevant ones with a feature can be added 
>>> with the feature patch, this eliminates dead code in the base code 
>>> layer, also helps user/review to understand the link between driver 
>>> code and base code.
>> 
>> Jumping in here with an opinion about welcoming new NIC vendors to the community:
>> 
>> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.
>> 
>> If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.
>> 
>
>Hi Morten,
>
>I was thinking same before making my above comment, what happens if vendors submit as one big patch and when a problem occurs we can ask owner to fix. Probably this makes vendor happy and makes my life (or any other maintainer's life) easier, it is always easier to say yes.
>
>
>But I come up with two main reasons to ask for a rework:
>
>1- Technically any vendor can deliver their software to their customers via a public git repository, they don't have to upstream to https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fdpdk.org&c=E,1,NpoJejuuvPdOPfcFJYtsmkQF6PVrDjGsZ8x_gi5xDrTyZokK_nM11u4ZpzHgM10J9bOLlnhoR6fFAzWtCzOhRCzVruYj520zZORv6-MjJeSC5TrGnIFL&typo=1,
>but upstreaming has many benefits.
>
>One of those benefits is upstreaming provides a quality assurance for vendor's customers (that is why customer can be asking for this, as we are having in many cases), and this quality assurance comes from additional eyes reviewing the code and guiding vendors for the DPDK quality standards (some vendors already doing pretty good, but new ones sometimes requires hand-holding).
>
>If driver is one big patch series, it is practically not possible to review it, I can catch a few bits here or there, you may some others, but practically it will be merged without review, and we will fail on our quality assurance task.
>
>2- Make code more accessible to the rest of the world.
>
>When it is a big patch, code can be functional but lots of details, reasoning, relation between components gets lost, which makes it even harder for an external developer, like me, to understand it (I am a mere guinea pig here :).
>
>If a customer would like to add a feature themselves, or fix something, even after vendor no more working on that product anymore, customer needs to understand the code or some reasoning in the code.
>Or if someone wants to backport the driver to rust, or a DPDK developer wants to do a rework that requires updating all drivers, or a tester would like to analyze the code to figure out behavior difference of the devices. I think I have witness all above cases in real life.
>
>If driver is split into more patches, it makes patch easier to understand which makes code practically more accessible to other developers that are not expert in driver.
>
>
>Overall, yes splitting patch takes time and effort, and yes this is an overhead for a code that is already developed, but I think benefit is big so it worth doing the task.
>
>
>> We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.
>> 
>
>Agree to not make upstreaming difficult for new vendors, and indeed we are encouraging more vendors to be upstream their code, this is in best interest of both sides.
>
>Distributing upstreaming effort to a year was just a suggestion, it can go in earlier as it is becomes ready but I can see it will take time to split driver into features and upstream them.
>
>As I am from a vendor too, I can understand the product/customer pressure, but I hope this approach can encourage vendors start upstreaming early or even better upstream as they develop the code.

Hi Ferruh,

First of all, thank you for starting the work to review our code.

As Morten said Napatech plans to take all responsibility for the
quality of the PMD source code. We expect to provide all fixes
needed in the future. If for some reason Napatech stops maintaining
the code, then we have been informed that the DPDK community
might delete the PMD from the repository, and we understand that.

In regards to splitting the code, I don't see this a good option. While
I of cause agree it would be easier to review and understand, the
code should also result in a meaningful product. Of the 87k lines
of code, 53k lines is needed to start-up the FPGA to a state the it is ready
to receive traffic. But at this point all packets would simply be discarded,
and to be honest, there are better and cheaper options out there,
if nothing more than basic functionality is needed. 34k lines are
used to setup filters based on rte_flow. The thing is, that you need
to initialize all modules in the FPGA TX- and RX-pipelines with valid
data, even if you don't need the features those modules provide.
As a result, if you split up the 34k lines, then the product would not
be functional. Of cause some of the top level logic could be split out,
but at this point we are talking about splitting 87k lines into 80k and 7k,
which I don't think is worth it.

>>>
>>>
>>> As far as I understand last patch opens a socket interface and an 
>>> external application can sent control commands via this interface.
>>> I am not sure about this side control channel, what is missing in the 
>>> DPDK API? Can we try to address them in the DPDK layer instead of a 
>>> driver specific solution?
>> 
>> That would be great.
>> 
>> AFAIK, other vendors also has a bunch of out-of-band communication, 
>> e.g. magical EAL parameters to the MLX drivers. So let's not be too 
>> hard on the newcomers. ;-)
>> 
>
>I did some thinking for this one too,
>
>As we are in userspace, it is easy to have side control channel, and this can make users life easy, so this is a practical thing to do.
>(Indeed there are already some ways to do this, without PMD exposing a socket interface.)
>
>But this also reduces effort developers putting on DPDK layer solution, because it is always easier to add more support to the driver only.
>And overall this reduces portability of the DPDK application, each application becomes unique to a device (This is a bad thing, but I also need some feedback how bad it is in real life.)
>
>To balance this, we said if a feature is too specific to a device, it can add device specific API and this is better than device specific features pollute the common, most used code. And push back to introduce more new PMD specific APIs unless it is really needed.
>
>But creating a socket interface directly from the driver is more than PMD specific API. Technically application control interface can rely completely to this. Even we assume this is not for control, but just for debug, I can see it can be useful for debug and again practical thing to do, I am still not sure how much it hurts if each driver has a custom socket interface for their debug needs.
>
>Overall it makes more sense to me to have a unified/common interface from drivers to DPDK applications, which is through the ethdev layer.
>And improve and extend the ethdev layer to satisfy driver needs.
>
>In this specific example, I am for rejecting the socket interface patch, but I would like to get more feedback from @techboard.
>

The reason we have the addition control channel is not provide
additional functionality. We have customers with use-cases that
require multiple processes. Since Napatech adapters do not support
configuration through VFs, then secondary applications must send
their rte_flow to a main application, which will then setup the flow
through it's PF. This control channel "hides" these details, and
make the product easier for users to adapt to their existing solutions.

If you stand firm on rejecting the control channel, then we have
to go back to the drawing board on this issue. We did look at
DPDK's multi-process support, and actually had some support
for this, but we determined that for our use-case it was better
to have a communication channel, and no shared memory.

>
>And related to not being too hard on the newcomers, unrelated to being a newcomer or not, if a process/feature/approach approved once, some others will point to it and will ask to do the same which is fair in their perspective. I had multiple instance of this in the past.
>
>Of course we are being easy to newcomers but not in a way to allow code that we believe is not good thing to do, but going easy on process may be :)
>

We are grateful for any leniency you may show us ;-)

Thanks again,
Christian

>
>>>
>>>
>>> Thanks,
>>> ferruh
>> 
>> Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!
>> 
>> Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.
>> 
>> -Morten
>>

^ permalink raw reply	[flat|nested] 142+ messages in thread

* Re: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-09-19  9:06         ` Christian Koue Muf
@ 2023-09-20  9:48           ` Ferruh Yigit
  2023-09-20 13:17           ` Thomas Monjalon
  1 sibling, 0 replies; 142+ messages in thread
From: Ferruh Yigit @ 2023-09-20  9:48 UTC (permalink / raw)
  To: Christian Koue Muf, Morten Brørup, Mykola Kostenok, dev
  Cc: thomas, andrew.rybchenko, techboard

On 9/19/2023 10:06 AM, Christian Koue Muf wrote:
> On 9/18/23 10:34 AM, Ferruh Yigit wrote:
>> On 9/15/2023 7:37 PM, Morten Brørup wrote:
>>>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
>>>> Sent: Friday, 15 September 2023 17.55
>>>>
>>>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
>>>>> From: Christian Koue Muf <ckm@napatech.com>
>>>>>
>>>>> The NTNIC PMD does not rely on a kernel space Napatech driver, thus
>>>>> all defines related to the register layout is part of the PMD code,
>>>>> which will be added in later commits.
>>>>>
>>>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
>>>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
>>>>>
>>>>
>>>> Hi Mykola, Christiam,
>>>>
>>>> This PMD scares me, overall it is a big drop:
>>>> "249 files changed, 87128 insertions(+)"
>>>>
>>>> I think it is not possible to review all in one release cycle, and it
>>>> is not even possible to say if all code used or not.
>>>>
>>>> I can see code is already developed, and it is difficult to
>>>> restructure developed code, but restructure it into small pieces
>>>> really helps for reviews.
>>>>
>>>>
>>>> Driver supports good list of features, can it be possible to
>>>> distribute upstream effort into multiple release.
>>>> Starting from basic functionality and add features gradually.
>>>> Target for this release can be providing datapath, and add more if we
>>>> have time in the release, what do you think?
>>>>
>>>>
>>>> Also there are large amount of base code (HAL / FPGA code), instead
>>>> of adding them as a bulk, relevant ones with a feature can be added
>>>> with the feature patch, this eliminates dead code in the base code
>>>> layer, also helps user/review to understand the link between driver
>>>> code and base code.
>>>
>>> Jumping in here with an opinion about welcoming new NIC vendors to the community:
>>>
>>> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.
>>>
>>> If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.
>>>
>>
>> Hi Morten,
>>
>> I was thinking same before making my above comment, what happens if vendors submit as one big patch and when a problem occurs we can ask owner to fix. Probably this makes vendor happy and makes my life (or any other maintainer's life) easier, it is always easier to say yes.
>>
>>
>> But I come up with two main reasons to ask for a rework:
>>
>> 1- Technically any vendor can deliver their software to their customers via a public git repository, they don't have to upstream to https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fdpdk.org&c=E,1,NpoJejuuvPdOPfcFJYtsmkQF6PVrDjGsZ8x_gi5xDrTyZokK_nM11u4ZpzHgM10J9bOLlnhoR6fFAzWtCzOhRCzVruYj520zZORv6-MjJeSC5TrGnIFL&typo=1,
>> but upstreaming has many benefits.
>>
>> One of those benefits is upstreaming provides a quality assurance for vendor's customers (that is why customer can be asking for this, as we are having in many cases), and this quality assurance comes from additional eyes reviewing the code and guiding vendors for the DPDK quality standards (some vendors already doing pretty good, but new ones sometimes requires hand-holding).
>>
>> If driver is one big patch series, it is practically not possible to review it, I can catch a few bits here or there, you may some others, but practically it will be merged without review, and we will fail on our quality assurance task.
>>
>> 2- Make code more accessible to the rest of the world.
>>
>> When it is a big patch, code can be functional but lots of details, reasoning, relation between components gets lost, which makes it even harder for an external developer, like me, to understand it (I am a mere guinea pig here :).
>>
>> If a customer would like to add a feature themselves, or fix something, even after vendor no more working on that product anymore, customer needs to understand the code or some reasoning in the code.
>> Or if someone wants to backport the driver to rust, or a DPDK developer wants to do a rework that requires updating all drivers, or a tester would like to analyze the code to figure out behavior difference of the devices. I think I have witness all above cases in real life.
>>
>> If driver is split into more patches, it makes patch easier to understand which makes code practically more accessible to other developers that are not expert in driver.
>>
>>
>> Overall, yes splitting patch takes time and effort, and yes this is an overhead for a code that is already developed, but I think benefit is big so it worth doing the task.
>>
>>
>>> We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.
>>>
>>
>> Agree to not make upstreaming difficult for new vendors, and indeed we are encouraging more vendors to be upstream their code, this is in best interest of both sides.
>>
>> Distributing upstreaming effort to a year was just a suggestion, it can go in earlier as it is becomes ready but I can see it will take time to split driver into features and upstream them.
>>
>> As I am from a vendor too, I can understand the product/customer pressure, but I hope this approach can encourage vendors start upstreaming early or even better upstream as they develop the code.
> 
> Hi Ferruh,
> 
> First of all, thank you for starting the work to review our code.
> 
> As Morten said Napatech plans to take all responsibility for the
> quality of the PMD source code. We expect to provide all fixes
> needed in the future. If for some reason Napatech stops maintaining
> the code, then we have been informed that the DPDK community
> might delete the PMD from the repository, and we understand that.
> 

Deleting PMD is easy, maintaining it when company is not around is a
challenge.


> In regards to splitting the code, I don't see this a good option. While
> I of cause agree it would be easier to review and understand, the
> code should also result in a meaningful product. Of the 87k lines
> of code, 53k lines is needed to start-up the FPGA to a state the it is ready
> to receive traffic. But at this point all packets would simply be discarded,
> and to be honest, there are better and cheaper options out there,
> if nothing more than basic functionality is needed. 34k lines are
> used to setup filters based on rte_flow. The thing is, that you need
> to initialize all modules in the FPGA TX- and RX-pipelines with valid
> data, even if you don't need the features those modules provide.
> As a result, if you split up the 34k lines, then the product would not
> be functional. Of cause some of the top level logic could be split out,
> but at this point we are talking about splitting 87k lines into 80k and 7k,
> which I don't think is worth it.
> 

We had similar code base in DPDK before, heavy and multi-platform
supported HAL layer approach is not unique to this drier, we can figure
out details and can find a sweat spot for the case.


>>>>
>>>>
>>>> As far as I understand last patch opens a socket interface and an
>>>> external application can sent control commands via this interface.
>>>> I am not sure about this side control channel, what is missing in the
>>>> DPDK API? Can we try to address them in the DPDK layer instead of a
>>>> driver specific solution?
>>>
>>> That would be great.
>>>
>>> AFAIK, other vendors also has a bunch of out-of-band communication,
>>> e.g. magical EAL parameters to the MLX drivers. So let's not be too
>>> hard on the newcomers. ;-)
>>>
>>
>> I did some thinking for this one too,
>>
>> As we are in userspace, it is easy to have side control channel, and this can make users life easy, so this is a practical thing to do.
>> (Indeed there are already some ways to do this, without PMD exposing a socket interface.)
>>
>> But this also reduces effort developers putting on DPDK layer solution, because it is always easier to add more support to the driver only.
>> And overall this reduces portability of the DPDK application, each application becomes unique to a device (This is a bad thing, but I also need some feedback how bad it is in real life.)
>>
>> To balance this, we said if a feature is too specific to a device, it can add device specific API and this is better than device specific features pollute the common, most used code. And push back to introduce more new PMD specific APIs unless it is really needed.
>>
>> But creating a socket interface directly from the driver is more than PMD specific API. Technically application control interface can rely completely to this. Even we assume this is not for control, but just for debug, I can see it can be useful for debug and again practical thing to do, I am still not sure how much it hurts if each driver has a custom socket interface for their debug needs.
>>
>> Overall it makes more sense to me to have a unified/common interface from drivers to DPDK applications, which is through the ethdev layer.
>> And improve and extend the ethdev layer to satisfy driver needs.
>>
>> In this specific example, I am for rejecting the socket interface patch, but I would like to get more feedback from @techboard.
>>
> 
> The reason we have the addition control channel is not provide
> additional functionality. We have customers with use-cases that
> require multiple processes. Since Napatech adapters do not support
> configuration through VFs, then secondary applications must send
> their rte_flow to a main application, which will then setup the flow
> through it's PF. This control channel "hides" these details, and
> make the product easier for users to adapt to their existing solutions.
> 
> If you stand firm on rejecting the control channel, then we have
> to go back to the drawing board on this issue. We did look at
> DPDK's multi-process support, and actually had some support
> for this, but we determined that for our use-case it was better
> to have a communication channel, and no shared memory.
> 

This is about individual drivers have custom interface for the specific
application needs, or they use common interfaces.
And how much custom interfaces hurts in long run. A wider discussion.

Like if you want to use your device and VFs by OvS, how you will do it
with this specific interface?


And specific to this driver,
if it is about primary/secondary process communication, as you mentioned
there is already a socket interface for it.

Or did you investigate VF representors, does it work for this case?

Another common approach is VF-PF mailbox communication that VF passes
requests to the PF in device, of course this requires device/FW support.


>>
>> And related to not being too hard on the newcomers, unrelated to being a newcomer or not, if a process/feature/approach approved once, some others will point to it and will ask to do the same which is fair in their perspective. I had multiple instance of this in the past.
>>
>> Of course we are being easy to newcomers but not in a way to allow code that we believe is not good thing to do, but going easy on process may be :)
>>
> 
> We are grateful for any leniency you may show us ;-)
> 
> Thanks again,
> Christian
> 
>>
>>>>
>>>>
>>>> Thanks,
>>>> ferruh
>>>
>>> Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!
>>>
>>> Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.
>>>
>>> -Morten
>>>
> Disclaimer: This email and any files transmitted with it may contain confidential information intended for the addressee(s) only. The information is not to be surrendered or copied to unauthorized persons. If you have received this communication in error, please notify the sender immediately and delete this e-mail from your system.


^ permalink raw reply	[flat|nested] 142+ messages in thread

* Re: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-09-19  9:06         ` Christian Koue Muf
  2023-09-20  9:48           ` Ferruh Yigit
@ 2023-09-20 13:17           ` Thomas Monjalon
  2023-09-21 14:04             ` Ferruh Yigit
  1 sibling, 1 reply; 142+ messages in thread
From: Thomas Monjalon @ 2023-09-20 13:17 UTC (permalink / raw)
  To: Ferruh Yigit, Morten Brørup, Mykola Kostenok, Christian Koue Muf
  Cc: dev, andrew.rybchenko, techboard

Hello,

19/09/2023 11:06, Christian Koue Muf:
> On 9/18/23 10:34 AM, Ferruh Yigit wrote:
> >On 9/15/2023 7:37 PM, Morten Brørup wrote:
> >>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
> >>> Sent: Friday, 15 September 2023 17.55
> >>>
> >>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
> >>>> From: Christian Koue Muf <ckm@napatech.com>
> >>>>
> >>>> The NTNIC PMD does not rely on a kernel space Napatech driver, thus 
> >>>> all defines related to the register layout is part of the PMD code, 
> >>>> which will be added in later commits.
> >>>>
> >>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
> >>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
> >>>>
> >>>
> >>> Hi Mykola, Christiam,
> >>>
> >>> This PMD scares me, overall it is a big drop:
> >>> "249 files changed, 87128 insertions(+)"
> >>>
> >>> I think it is not possible to review all in one release cycle, and it 
> >>> is not even possible to say if all code used or not.
> >>>
> >>> I can see code is already developed, and it is difficult to 
> >>> restructure developed code, but restructure it into small pieces 
> >>> really helps for reviews.
> >>>
> >>>
> >>> Driver supports good list of features, can it be possible to 
> >>> distribute upstream effort into multiple release.
> >>> Starting from basic functionality and add features gradually.
> >>> Target for this release can be providing datapath, and add more if we 
> >>> have time in the release, what do you think?

I was expecting to get only Rx/Tx in this release, not really more.

I agree it may be interesting to discuss some design
and check whether we need more features in ethdev
as part of the driver upstreaming process.


> >>> Also there are large amount of base code (HAL / FPGA code), instead 
> >>> of adding them as a bulk, relevant ones with a feature can be added 
> >>> with the feature patch, this eliminates dead code in the base code 
> >>> layer, also helps user/review to understand the link between driver 
> >>> code and base code.

Yes it would be interesting to see what is really needed for the basic initialization
and what is linked to a specific offload or configuration feature.

As a maintainer, I have to do some changes across all drivers sometimes,
and I use git blame a lot to understand why something was added.


> >> Jumping in here with an opinion about welcoming new NIC vendors to the community:
> >> 
> >> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.
> >> 
> >> If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.
> >> 
> >
> >Hi Morten,
> >
> >I was thinking same before making my above comment, what happens if vendors submit as one big patch and when a problem occurs we can ask owner to fix. Probably this makes vendor happy and makes my life (or any other maintainer's life) easier, it is always easier to say yes.
> >
> >
> >But I come up with two main reasons to ask for a rework:
> >
> >1- Technically any vendor can deliver their software to their customers via a public git repository, they don't have to upstream to https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fdpdk.org&c=E,1,NpoJejuuvPdOPfcFJYtsmkQF6PVrDjGsZ8x_gi5xDrTyZokK_nM11u4ZpzHgM10J9bOLlnhoR6fFAzWtCzOhRCzVruYj520zZORv6-MjJeSC5TrGnIFL&typo=1,
> >but upstreaming has many benefits.
> >
> >One of those benefits is upstreaming provides a quality assurance for vendor's customers (that is why customer can be asking for this, as we are having in many cases), and this quality assurance comes from additional eyes reviewing the code and guiding vendors for the DPDK quality standards (some vendors already doing pretty good, but new ones sometimes requires hand-holding).
> >
> >If driver is one big patch series, it is practically not possible to review it, I can catch a few bits here or there, you may some others, but practically it will be merged without review, and we will fail on our quality assurance task.
> >
> >2- Make code more accessible to the rest of the world.
> >
> >When it is a big patch, code can be functional but lots of details, reasoning, relation between components gets lost, which makes it even harder for an external developer, like me, to understand it (I am a mere guinea pig here :).
> >
> >If a customer would like to add a feature themselves, or fix something, even after vendor no more working on that product anymore, customer needs to understand the code or some reasoning in the code.
> >Or if someone wants to backport the driver to rust, or a DPDK developer wants to do a rework that requires updating all drivers, or a tester would like to analyze the code to figure out behavior difference of the devices. I think I have witness all above cases in real life.
> >
> >If driver is split into more patches, it makes patch easier to understand which makes code practically more accessible to other developers that are not expert in driver.

I fully agree about the 2 reasons for upstreaming piece by piece.


> >Overall, yes splitting patch takes time and effort, and yes this is an overhead for a code that is already developed, but I think benefit is big so it worth doing the task.

In the meantime, if some features are not yet upstreamed in a release,
a user can apply the missing patches from the mailing list to get the features.


> >> We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.

You're right Morten, we try to be as welcoming as possible,
but as Ferruh said, we want to be able to understand how a driver is built,
even if not understanding all details.

In Open Source, I think not only the code should be available,
we must also take care of explanations and documentation.


> >Agree to not make upstreaming difficult for new vendors, and indeed we are encouraging more vendors to be upstream their code, this is in best interest of both sides.
> >
> >Distributing upstreaming effort to a year was just a suggestion, it can go in earlier as it is becomes ready but I can see it will take time to split driver into features and upstream them.

Driver features can be added until -rc2 (in one month).


> >As I am from a vendor too, I can understand the product/customer pressure, but I hope this approach can encourage vendors start upstreaming early or even better upstream as they develop the code.
> 
> Hi Ferruh,
> 
> First of all, thank you for starting the work to review our code.
> 
> As Morten said Napatech plans to take all responsibility for the
> quality of the PMD source code. We expect to provide all fixes
> needed in the future. If for some reason Napatech stops maintaining
> the code, then we have been informed that the DPDK community
> might delete the PMD from the repository, and we understand that.
> 
> In regards to splitting the code, I don't see this a good option. While
> I of cause agree it would be easier to review and understand, the
> code should also result in a meaningful product. Of the 87k lines
> of code, 53k lines is needed to start-up the FPGA to a state the it is ready
> to receive traffic. But at this point all packets would simply be discarded,
> and to be honest, there are better and cheaper options out there,
> if nothing more than basic functionality is needed. 34k lines are
> used to setup filters based on rte_flow. The thing is, that you need
> to initialize all modules in the FPGA TX- and RX-pipelines with valid
> data, even if you don't need the features those modules provide.
> As a result, if you split up the 34k lines, then the product would not
> be functional. Of cause some of the top level logic could be split out,
> but at this point we are talking about splitting 87k lines into 80k and 7k,
> which I don't think is worth it.

Actually I think it is worth.
There is a benefit in isolating the small basic init part
from the more complex features.


> >>> As far as I understand last patch opens a socket interface and an 
> >>> external application can sent control commands via this interface.
> >>> I am not sure about this side control channel, what is missing in the 
> >>> DPDK API? Can we try to address them in the DPDK layer instead of a 
> >>> driver specific solution?
> >> 
> >> That would be great.
> >> 
> >> AFAIK, other vendors also has a bunch of out-of-band communication, 
> >> e.g. magical EAL parameters to the MLX drivers. So let's not be too 
> >> hard on the newcomers. ;-)
> >> 
> >
> >I did some thinking for this one too,
> >
> >As we are in userspace, it is easy to have side control channel, and this can make users life easy, so this is a practical thing to do.
> >(Indeed there are already some ways to do this, without PMD exposing a socket interface.)
> >
> >But this also reduces effort developers putting on DPDK layer solution, because it is always easier to add more support to the driver only.
> >And overall this reduces portability of the DPDK application, each application becomes unique to a device (This is a bad thing, but I also need some feedback how bad it is in real life.)
> >
> >To balance this, we said if a feature is too specific to a device, it can add device specific API and this is better than device specific features pollute the common, most used code. And push back to introduce more new PMD specific APIs unless it is really needed.
> >
> >But creating a socket interface directly from the driver is more than PMD specific API. Technically application control interface can rely completely to this. Even we assume this is not for control, but just for debug, I can see it can be useful for debug and again practical thing to do, I am still not sure how much it hurts if each driver has a custom socket interface for their debug needs.
> >
> >Overall it makes more sense to me to have a unified/common interface from drivers to DPDK applications, which is through the ethdev layer.
> >And improve and extend the ethdev layer to satisfy driver needs.
> >
> >In this specific example, I am for rejecting the socket interface patch, but I would like to get more feedback from @techboard.
> >
> 
> The reason we have the addition control channel is not provide
> additional functionality. We have customers with use-cases that
> require multiple processes. Since Napatech adapters do not support
> configuration through VFs, then secondary applications must send
> their rte_flow to a main application, which will then setup the flow
> through it's PF. This control channel "hides" these details, and
> make the product easier for users to adapt to their existing solutions.

I think you need to explore VF representors.
This is what is done with other drivers, and it make them compatible.

> If you stand firm on rejecting the control channel, then we have
> to go back to the drawing board on this issue. We did look at
> DPDK's multi-process support, and actually had some support
> for this, but we determined that for our use-case it was better
> to have a communication channel, and no shared memory.

I'm not sure your need is about secondary process.
Let's discuss this need in a meeting if needed.
Anyway, the message is that we want to be part of such design decision.


> >And related to not being too hard on the newcomers, unrelated to being a newcomer or not, if a process/feature/approach approved once, some others will point to it and will ask to do the same which is fair in their perspective. I had multiple instance of this in the past.
> >
> >Of course we are being easy to newcomers but not in a way to allow code that we believe is not good thing to do, but going easy on process may be :)
> >
> 
> We are grateful for any leniency you may show us ;-)
> 
> Thanks again,
> Christian
> 
> >
> >>>
> >>>
> >>> Thanks,
> >>> ferruh
> >> 
> >> Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!
> >> 
> >> Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.
> >> 
> >> -Morten


We are going to discuss the process in the technical board today.



^ permalink raw reply	[flat|nested] 142+ messages in thread

* Re: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-09-20 13:17           ` Thomas Monjalon
@ 2023-09-21 14:04             ` Ferruh Yigit
  2023-09-29  9:21               ` Christian Koue Muf
  0 siblings, 1 reply; 142+ messages in thread
From: Ferruh Yigit @ 2023-09-21 14:04 UTC (permalink / raw)
  To: Thomas Monjalon, Morten Brørup, Mykola Kostenok, Christian Koue Muf
  Cc: dev, andrew.rybchenko, techboard

On 9/20/2023 2:17 PM, Thomas Monjalon wrote:
> Hello,
> 
> 19/09/2023 11:06, Christian Koue Muf:
>> On 9/18/23 10:34 AM, Ferruh Yigit wrote:
>>> On 9/15/2023 7:37 PM, Morten Brørup wrote:
>>>>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
>>>>> Sent: Friday, 15 September 2023 17.55
>>>>>
>>>>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
>>>>>> From: Christian Koue Muf <ckm@napatech.com>
>>>>>>
>>>>>> The NTNIC PMD does not rely on a kernel space Napatech driver, thus 
>>>>>> all defines related to the register layout is part of the PMD code, 
>>>>>> which will be added in later commits.
>>>>>>
>>>>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
>>>>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
>>>>>>
>>>>>
>>>>> Hi Mykola, Christiam,
>>>>>
>>>>> This PMD scares me, overall it is a big drop:
>>>>> "249 files changed, 87128 insertions(+)"
>>>>>
>>>>> I think it is not possible to review all in one release cycle, and it 
>>>>> is not even possible to say if all code used or not.
>>>>>
>>>>> I can see code is already developed, and it is difficult to 
>>>>> restructure developed code, but restructure it into small pieces 
>>>>> really helps for reviews.
>>>>>
>>>>>
>>>>> Driver supports good list of features, can it be possible to 
>>>>> distribute upstream effort into multiple release.
>>>>> Starting from basic functionality and add features gradually.
>>>>> Target for this release can be providing datapath, and add more if we 
>>>>> have time in the release, what do you think?
> 
> I was expecting to get only Rx/Tx in this release, not really more.
> 
> I agree it may be interesting to discuss some design
> and check whether we need more features in ethdev
> as part of the driver upstreaming process.
> 
> 
>>>>> Also there are large amount of base code (HAL / FPGA code), instead 
>>>>> of adding them as a bulk, relevant ones with a feature can be added 
>>>>> with the feature patch, this eliminates dead code in the base code 
>>>>> layer, also helps user/review to understand the link between driver 
>>>>> code and base code.
> 
> Yes it would be interesting to see what is really needed for the basic initialization
> and what is linked to a specific offload or configuration feature.
> 
> As a maintainer, I have to do some changes across all drivers sometimes,
> and I use git blame a lot to understand why something was added.
> 
> 
>>>> Jumping in here with an opinion about welcoming new NIC vendors to the community:
>>>>
>>>> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.
>>>>
>>>> If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.
>>>>
>>>
>>> Hi Morten,
>>>
>>> I was thinking same before making my above comment, what happens if vendors submit as one big patch and when a problem occurs we can ask owner to fix. Probably this makes vendor happy and makes my life (or any other maintainer's life) easier, it is always easier to say yes.
>>>
>>>
>>> But I come up with two main reasons to ask for a rework:
>>>
>>> 1- Technically any vendor can deliver their software to their customers via a public git repository, they don't have to upstream to https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fdpdk.org&c=E,1,NpoJejuuvPdOPfcFJYtsmkQF6PVrDjGsZ8x_gi5xDrTyZokK_nM11u4ZpzHgM10J9bOLlnhoR6fFAzWtCzOhRCzVruYj520zZORv6-MjJeSC5TrGnIFL&typo=1,
>>> but upstreaming has many benefits.
>>>
>>> One of those benefits is upstreaming provides a quality assurance for vendor's customers (that is why customer can be asking for this, as we are having in many cases), and this quality assurance comes from additional eyes reviewing the code and guiding vendors for the DPDK quality standards (some vendors already doing pretty good, but new ones sometimes requires hand-holding).
>>>
>>> If driver is one big patch series, it is practically not possible to review it, I can catch a few bits here or there, you may some others, but practically it will be merged without review, and we will fail on our quality assurance task.
>>>
>>> 2- Make code more accessible to the rest of the world.
>>>
>>> When it is a big patch, code can be functional but lots of details, reasoning, relation between components gets lost, which makes it even harder for an external developer, like me, to understand it (I am a mere guinea pig here :).
>>>
>>> If a customer would like to add a feature themselves, or fix something, even after vendor no more working on that product anymore, customer needs to understand the code or some reasoning in the code.
>>> Or if someone wants to backport the driver to rust, or a DPDK developer wants to do a rework that requires updating all drivers, or a tester would like to analyze the code to figure out behavior difference of the devices. I think I have witness all above cases in real life.
>>>
>>> If driver is split into more patches, it makes patch easier to understand which makes code practically more accessible to other developers that are not expert in driver.
> 
> I fully agree about the 2 reasons for upstreaming piece by piece.
> 
> 
>>> Overall, yes splitting patch takes time and effort, and yes this is an overhead for a code that is already developed, but I think benefit is big so it worth doing the task.
> 
> In the meantime, if some features are not yet upstreamed in a release,
> a user can apply the missing patches from the mailing list to get the features.
> 
> 
>>>> We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.
> 
> You're right Morten, we try to be as welcoming as possible,
> but as Ferruh said, we want to be able to understand how a driver is built,
> even if not understanding all details.
> 
> In Open Source, I think not only the code should be available,
> we must also take care of explanations and documentation.
> 
> 
>>> Agree to not make upstreaming difficult for new vendors, and indeed we are encouraging more vendors to be upstream their code, this is in best interest of both sides.
>>>
>>> Distributing upstreaming effort to a year was just a suggestion, it can go in earlier as it is becomes ready but I can see it will take time to split driver into features and upstream them.
> 
> Driver features can be added until -rc2 (in one month).
> 
> 
>>> As I am from a vendor too, I can understand the product/customer pressure, but I hope this approach can encourage vendors start upstreaming early or even better upstream as they develop the code.
>>
>> Hi Ferruh,
>>
>> First of all, thank you for starting the work to review our code.
>>
>> As Morten said Napatech plans to take all responsibility for the
>> quality of the PMD source code. We expect to provide all fixes
>> needed in the future. If for some reason Napatech stops maintaining
>> the code, then we have been informed that the DPDK community
>> might delete the PMD from the repository, and we understand that.
>>
>> In regards to splitting the code, I don't see this a good option. While
>> I of cause agree it would be easier to review and understand, the
>> code should also result in a meaningful product. Of the 87k lines
>> of code, 53k lines is needed to start-up the FPGA to a state the it is ready
>> to receive traffic. But at this point all packets would simply be discarded,
>> and to be honest, there are better and cheaper options out there,
>> if nothing more than basic functionality is needed. 34k lines are
>> used to setup filters based on rte_flow. The thing is, that you need
>> to initialize all modules in the FPGA TX- and RX-pipelines with valid
>> data, even if you don't need the features those modules provide.
>> As a result, if you split up the 34k lines, then the product would not
>> be functional. Of cause some of the top level logic could be split out,
>> but at this point we are talking about splitting 87k lines into 80k and 7k,
>> which I don't think is worth it.
> 
> Actually I think it is worth.
> There is a benefit in isolating the small basic init part
> from the more complex features.
> 
> 
>>>>> As far as I understand last patch opens a socket interface and an 
>>>>> external application can sent control commands via this interface.
>>>>> I am not sure about this side control channel, what is missing in the 
>>>>> DPDK API? Can we try to address them in the DPDK layer instead of a 
>>>>> driver specific solution?
>>>>
>>>> That would be great.
>>>>
>>>> AFAIK, other vendors also has a bunch of out-of-band communication, 
>>>> e.g. magical EAL parameters to the MLX drivers. So let's not be too 
>>>> hard on the newcomers. ;-)
>>>>
>>>
>>> I did some thinking for this one too,
>>>
>>> As we are in userspace, it is easy to have side control channel, and this can make users life easy, so this is a practical thing to do.
>>> (Indeed there are already some ways to do this, without PMD exposing a socket interface.)
>>>
>>> But this also reduces effort developers putting on DPDK layer solution, because it is always easier to add more support to the driver only.
>>> And overall this reduces portability of the DPDK application, each application becomes unique to a device (This is a bad thing, but I also need some feedback how bad it is in real life.)
>>>
>>> To balance this, we said if a feature is too specific to a device, it can add device specific API and this is better than device specific features pollute the common, most used code. And push back to introduce more new PMD specific APIs unless it is really needed.
>>>
>>> But creating a socket interface directly from the driver is more than PMD specific API. Technically application control interface can rely completely to this. Even we assume this is not for control, but just for debug, I can see it can be useful for debug and again practical thing to do, I am still not sure how much it hurts if each driver has a custom socket interface for their debug needs.
>>>
>>> Overall it makes more sense to me to have a unified/common interface from drivers to DPDK applications, which is through the ethdev layer.
>>> And improve and extend the ethdev layer to satisfy driver needs.
>>>
>>> In this specific example, I am for rejecting the socket interface patch, but I would like to get more feedback from @techboard.
>>>
>>
>> The reason we have the addition control channel is not provide
>> additional functionality. We have customers with use-cases that
>> require multiple processes. Since Napatech adapters do not support
>> configuration through VFs, then secondary applications must send
>> their rte_flow to a main application, which will then setup the flow
>> through it's PF. This control channel "hides" these details, and
>> make the product easier for users to adapt to their existing solutions.
> 
> I think you need to explore VF representors.
> This is what is done with other drivers, and it make them compatible.
> 
>> If you stand firm on rejecting the control channel, then we have
>> to go back to the drawing board on this issue. We did look at
>> DPDK's multi-process support, and actually had some support
>> for this, but we determined that for our use-case it was better
>> to have a communication channel, and no shared memory.
> 
> I'm not sure your need is about secondary process.
> Let's discuss this need in a meeting if needed.
> Anyway, the message is that we want to be part of such design decision.
> 
> 
>>> And related to not being too hard on the newcomers, unrelated to being a newcomer or not, if a process/feature/approach approved once, some others will point to it and will ask to do the same which is fair in their perspective. I had multiple instance of this in the past.
>>>
>>> Of course we are being easy to newcomers but not in a way to allow code that we believe is not good thing to do, but going easy on process may be :)
>>>
>>
>> We are grateful for any leniency you may show us ;-)
>>
>> Thanks again,
>> Christian
>>
>>>
>>>>>
>>>>>
>>>>> Thanks,
>>>>> ferruh
>>>>
>>>> Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!
>>>>
>>>> Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.
>>>>
>>>> -Morten
> 
> 
> We are going to discuss the process in the technical board today.
> 
> 

Hi Mykola, Christiam,

As discussed, following are a few good examples from the DPDK history,
there is no "fits all, fixed guidelines", but they can serve as samples:

Marvell cnxk:
https://patchwork.dpdk.org/project/dpdk/list/?series=17449&state=%2A&archive=both


Solarflare sfc (before patchwork series support):
https://patchwork.dpdk.org/project/dpdk/patch/1480436367-20749-2-git-send-email-arybchenko@solarflare.com/
to
https://patchwork.dpdk.org/project/dpdk/patch/1480436367-20749-56-git-send-email-arybchenko@solarflare.com/


Intel ice:
https://patchwork.dpdk.org/project/dpdk/list/?series=2842&state=%2A&archive=both



^ permalink raw reply	[flat|nested] 142+ messages in thread

* RE: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-09-21 14:04             ` Ferruh Yigit
@ 2023-09-29  9:21               ` Christian Koue Muf
  2023-09-29  9:46                 ` Ferruh Yigit
  0 siblings, 1 reply; 142+ messages in thread
From: Christian Koue Muf @ 2023-09-29  9:21 UTC (permalink / raw)
  To: Ferruh Yigit, Thomas Monjalon, Morten Brørup, Mykola Kostenok
  Cc: dev, andrew.rybchenko, techboard

On 9/21/2023 4:05 PM, Ferruh Yigit wrote:
>On 9/20/2023 2:17 PM, Thomas Monjalon wrote:
>> Hello,
>> 
>> 19/09/2023 11:06, Christian Koue Muf:
>>> On 9/18/23 10:34 AM, Ferruh Yigit wrote:
>>>> On 9/15/2023 7:37 PM, Morten Brørup wrote:
>>>>>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
>>>>>> Sent: Friday, 15 September 2023 17.55
>>>>>>
>>>>>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
>>>>>>> From: Christian Koue Muf <ckm@napatech.com>
>>>>>>>
>>>>>>> The NTNIC PMD does not rely on a kernel space Napatech driver, 
>>>>>>> thus all defines related to the register layout is part of the 
>>>>>>> PMD code, which will be added in later commits.
>>>>>>>
>>>>>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
>>>>>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
>>>>>>>
>>>>>>
>>>>>> Hi Mykola, Christiam,
>>>>>>
>>>>>> This PMD scares me, overall it is a big drop:
>>>>>> "249 files changed, 87128 insertions(+)"
>>>>>>
>>>>>> I think it is not possible to review all in one release cycle, and 
>>>>>> it is not even possible to say if all code used or not.
>>>>>>
>>>>>> I can see code is already developed, and it is difficult to 
>>>>>> restructure developed code, but restructure it into small pieces 
>>>>>> really helps for reviews.
>>>>>>
>>>>>>
>>>>>> Driver supports good list of features, can it be possible to 
>>>>>> distribute upstream effort into multiple release.
>>>>>> Starting from basic functionality and add features gradually.
>>>>>> Target for this release can be providing datapath, and add more if 
>>>>>> we have time in the release, what do you think?
>> 
>> I was expecting to get only Rx/Tx in this release, not really more.
>> 
>> I agree it may be interesting to discuss some design and check whether 
>> we need more features in ethdev as part of the driver upstreaming 
>> process.
>> 
>> 
>>>>>> Also there are large amount of base code (HAL / FPGA code), 
>>>>>> instead of adding them as a bulk, relevant ones with a feature can 
>>>>>> be added with the feature patch, this eliminates dead code in the 
>>>>>> base code layer, also helps user/review to understand the link 
>>>>>> between driver code and base code.
>> 
>> Yes it would be interesting to see what is really needed for the basic 
>> initialization and what is linked to a specific offload or configuration feature.
>> 
>> As a maintainer, I have to do some changes across all drivers 
>> sometimes, and I use git blame a lot to understand why something was added.
>> 
>> 
>>>>> Jumping in here with an opinion about welcoming new NIC vendors to the community:
>>>>>
>>>>> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.
>>>>>
>>>>> If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.
>>>>>
>>>>
>>>> Hi Morten,
>>>>
>>>> I was thinking same before making my above comment, what happens if vendors submit as one big patch and when a problem occurs we can ask owner to fix. Probably this makes vendor happy and makes my life (or any other maintainer's life) easier, it is always easier to say yes.
>>>>
>>>>
>>>> But I come up with two main reasons to ask for a rework:
>>>>
>>>> 1- Technically any vendor can deliver their software to their 
>>>> customers via a public git repository, they don't have to upstream 
>>>> to 
>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fdpdk.org&c=E,1,N
>>>> poJejuuvPdOPfcFJYtsmkQF6PVrDjGsZ8x_gi5xDrTyZokK_nM11u4ZpzHgM10J9bOLl
>>>> nhoR6fFAzWtCzOhRCzVruYj520zZORv6-MjJeSC5TrGnIFL&typo=1,
>>>> but upstreaming has many benefits.
>>>>
>>>> One of those benefits is upstreaming provides a quality assurance for vendor's customers (that is why customer can be asking for this, as we are having in many cases), and this quality assurance comes from additional eyes reviewing the code and guiding vendors for the DPDK quality standards (some vendors already doing pretty good, but new ones sometimes requires hand-holding).
>>>>
>>>> If driver is one big patch series, it is practically not possible to review it, I can catch a few bits here or there, you may some others, but practically it will be merged without review, and we will fail on our quality assurance task.
>>>>
>>>> 2- Make code more accessible to the rest of the world.
>>>>
>>>> When it is a big patch, code can be functional but lots of details, reasoning, relation between components gets lost, which makes it even harder for an external developer, like me, to understand it (I am a mere guinea pig here :).
>>>>
>>>> If a customer would like to add a feature themselves, or fix something, even after vendor no more working on that product anymore, customer needs to understand the code or some reasoning in the code.
>>>> Or if someone wants to backport the driver to rust, or a DPDK developer wants to do a rework that requires updating all drivers, or a tester would like to analyze the code to figure out behavior difference of the devices. I think I have witness all above cases in real life.
>>>>
>>>> If driver is split into more patches, it makes patch easier to understand which makes code practically more accessible to other developers that are not expert in driver.
>> 
>> I fully agree about the 2 reasons for upstreaming piece by piece.
>> 
>> 
>>>> Overall, yes splitting patch takes time and effort, and yes this is an overhead for a code that is already developed, but I think benefit is big so it worth doing the task.
>> 
>> In the meantime, if some features are not yet upstreamed in a release, 
>> a user can apply the missing patches from the mailing list to get the features.
>> 
>> 
>>>>> We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.
>> 
>> You're right Morten, we try to be as welcoming as possible, but as 
>> Ferruh said, we want to be able to understand how a driver is built, 
>> even if not understanding all details.
>> 
>> In Open Source, I think not only the code should be available, we must 
>> also take care of explanations and documentation.
>> 
>> 
>>>> Agree to not make upstreaming difficult for new vendors, and indeed we are encouraging more vendors to be upstream their code, this is in best interest of both sides.
>>>>
>>>> Distributing upstreaming effort to a year was just a suggestion, it can go in earlier as it is becomes ready but I can see it will take time to split driver into features and upstream them.
>> 
>> Driver features can be added until -rc2 (in one month).
>> 
>> 
>>>> As I am from a vendor too, I can understand the product/customer pressure, but I hope this approach can encourage vendors start upstreaming early or even better upstream as they develop the code.
>>>
>>> Hi Ferruh,
>>>
>>> First of all, thank you for starting the work to review our code.
>>>
>>> As Morten said Napatech plans to take all responsibility for the 
>>> quality of the PMD source code. We expect to provide all fixes needed 
>>> in the future. If for some reason Napatech stops maintaining the 
>>> code, then we have been informed that the DPDK community might delete 
>>> the PMD from the repository, and we understand that.
>>>
>>> In regards to splitting the code, I don't see this a good option. 
>>> While I of cause agree it would be easier to review and understand, 
>>> the code should also result in a meaningful product. Of the 87k lines 
>>> of code, 53k lines is needed to start-up the FPGA to a state the it 
>>> is ready to receive traffic. But at this point all packets would 
>>> simply be discarded, and to be honest, there are better and cheaper 
>>> options out there, if nothing more than basic functionality is 
>>> needed. 34k lines are used to setup filters based on rte_flow. The 
>>> thing is, that you need to initialize all modules in the FPGA TX- and 
>>> RX-pipelines with valid data, even if you don't need the features those modules provide.
>>> As a result, if you split up the 34k lines, then the product would 
>>> not be functional. Of cause some of the top level logic could be 
>>> split out, but at this point we are talking about splitting 87k lines 
>>> into 80k and 7k, which I don't think is worth it.
>> 
>> Actually I think it is worth.
>> There is a benefit in isolating the small basic init part from the 
>> more complex features.
>> 
>> 
>>>>>> As far as I understand last patch opens a socket interface and an 
>>>>>> external application can sent control commands via this interface.
>>>>>> I am not sure about this side control channel, what is missing in 
>>>>>> the DPDK API? Can we try to address them in the DPDK layer instead 
>>>>>> of a driver specific solution?
>>>>>
>>>>> That would be great.
>>>>>
>>>>> AFAIK, other vendors also has a bunch of out-of-band communication, 
>>>>> e.g. magical EAL parameters to the MLX drivers. So let's not be too 
>>>>> hard on the newcomers. ;-)
>>>>>
>>>>
>>>> I did some thinking for this one too,
>>>>
>>>> As we are in userspace, it is easy to have side control channel, and this can make users life easy, so this is a practical thing to do.
>>>> (Indeed there are already some ways to do this, without PMD exposing 
>>>> a socket interface.)
>>>>
>>>> But this also reduces effort developers putting on DPDK layer solution, because it is always easier to add more support to the driver only.
>>>> And overall this reduces portability of the DPDK application, each 
>>>> application becomes unique to a device (This is a bad thing, but I 
>>>> also need some feedback how bad it is in real life.)
>>>>
>>>> To balance this, we said if a feature is too specific to a device, it can add device specific API and this is better than device specific features pollute the common, most used code. And push back to introduce more new PMD specific APIs unless it is really needed.
>>>>
>>>> But creating a socket interface directly from the driver is more than PMD specific API. Technically application control interface can rely completely to this. Even we assume this is not for control, but just for debug, I can see it can be useful for debug and again practical thing to do, I am still not sure how much it hurts if each driver has a custom socket interface for their debug needs.
>>>>
>>>> Overall it makes more sense to me to have a unified/common interface from drivers to DPDK applications, which is through the ethdev layer.
>>>> And improve and extend the ethdev layer to satisfy driver needs.
>>>>
>>>> In this specific example, I am for rejecting the socket interface patch, but I would like to get more feedback from @techboard.
>>>>
>>>
>>> The reason we have the addition control channel is not provide 
>>> additional functionality. We have customers with use-cases that 
>>> require multiple processes. Since Napatech adapters do not support 
>>> configuration through VFs, then secondary applications must send 
>>> their rte_flow to a main application, which will then setup the flow 
>>> through it's PF. This control channel "hides" these details, and make 
>>> the product easier for users to adapt to their existing solutions.
>> 
>> I think you need to explore VF representors.
>> This is what is done with other drivers, and it make them compatible.
>> 
>>> If you stand firm on rejecting the control channel, then we have to 
>>> go back to the drawing board on this issue. We did look at DPDK's 
>>> multi-process support, and actually had some support for this, but we 
>>> determined that for our use-case it was better to have a 
>>> communication channel, and no shared memory.
>> 
>> I'm not sure your need is about secondary process.
>> Let's discuss this need in a meeting if needed.
>> Anyway, the message is that we want to be part of such design decision.
>> 
>> 
>>>> And related to not being too hard on the newcomers, unrelated to being a newcomer or not, if a process/feature/approach approved once, some others will point to it and will ask to do the same which is fair in their perspective. I had multiple instance of this in the past.
>>>>
>>>> Of course we are being easy to newcomers but not in a way to allow 
>>>> code that we believe is not good thing to do, but going easy on 
>>>> process may be :)
>>>>
>>>
>>> We are grateful for any leniency you may show us ;-)
>>>
>>> Thanks again,
>>> Christian
>>>
>>>>
>>>>>>
>>>>>>
>>>>>> Thanks,
>>>>>> ferruh
>>>>>
>>>>> Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!
>>>>>
>>>>> Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.
>>>>>
>>>>> -Morten
>> 
>> 
>> We are going to discuss the process in the technical board today.
>> 
>> 
>
>Hi Mykola, Christiam,
>
>As discussed, following are a few good examples from the DPDK history, there is no "fits all, fixed guidelines", but they can serve as samples:
>
>Marvell cnxk:
>https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2flist%2f%3fseries%3d17449%26state%3d%252A%26archive%3dboth&c=E,1,DmXU0iHwXoSaZ4bKn-yhX9J8XmFBispd2ut7pxLNBkK3Q4LVpG_zmOf1jnWSS-Y0Fx-TNbPnQDHyBZkDj23Gu7zjPZ5nsA7pid5CsE2vxNk,&typo=1
>
>
>Solarflare sfc (before patchwork series support):
>https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-2-git-send-email-arybchenko%40solarflare.com%2f&c=E,1,E9oUT_1WuNC2JA8x7an3rC_Pm5g1L5cxJKQ6pTwSbCWSJpiLH2GnmgfFkUqViOOwkpS2df8kgBvHjmulKaWhyr4BBizUT-sL5LJv21Hx4RtHtK3vjhcKpg,,&typo=1
>to
>https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-56-git-send-email-arybchenko%40solarflare.com%2f&c=E,1,GByF_TiC_q11iVPpiPgpCMlSge-J0XfT0zHkriK0rde1Qt1RG7uf6mETQkTSQ-1V86Z7EtRcxlvSsed1sqn8RWfN8KFSbd7NaAkfbDiehn_vSRzja45rQgv53Q,,&typo=1
>
>
>Intel ice:
>https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2flist%2f%3fseries%3d2842%26state%3d%252A%26archive%3dboth&c=E,1,zQwvAIR3ToLIhT09bVxm_HEF-dp8eyTqhsKB3eOYgIJdd2WS_0ZlTbQKfr9KLyTA3A2A2HzBbjIlz21D_hWVgS_INmmC5eew1J0QBH-PoRNd&typo=1
>

Thank you for the links, they have been very helpful.

After a lot of internal discussion, Napatech has decided to implement some architectural changes to our PMD that will allow us to easier split up the code into smaller features. The work will require some time, which means that we will not be ready for the 23.11 release. The current goal is to attempt to upstream a quite basic PMD in time for 24.7, and a fully featured PMD for 24.11.

Best regards
Christian Muf


^ permalink raw reply	[flat|nested] 142+ messages in thread

* Re: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-09-29  9:21               ` Christian Koue Muf
@ 2023-09-29  9:46                 ` Ferruh Yigit
  2023-09-29 10:23                   ` Thomas Monjalon
  0 siblings, 1 reply; 142+ messages in thread
From: Ferruh Yigit @ 2023-09-29  9:46 UTC (permalink / raw)
  To: Christian Koue Muf, Thomas Monjalon, Morten Brørup, Mykola Kostenok
  Cc: dev, andrew.rybchenko, techboard

On 9/29/2023 10:21 AM, Christian Koue Muf wrote:
> On 9/21/2023 4:05 PM, Ferruh Yigit wrote:
>> On 9/20/2023 2:17 PM, Thomas Monjalon wrote:
>>> Hello,
>>>
>>> 19/09/2023 11:06, Christian Koue Muf:
>>>> On 9/18/23 10:34 AM, Ferruh Yigit wrote:
>>>>> On 9/15/2023 7:37 PM, Morten Brørup wrote:
>>>>>>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
>>>>>>> Sent: Friday, 15 September 2023 17.55
>>>>>>>
>>>>>>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
>>>>>>>> From: Christian Koue Muf <ckm@napatech.com>
>>>>>>>>
>>>>>>>> The NTNIC PMD does not rely on a kernel space Napatech driver,
>>>>>>>> thus all defines related to the register layout is part of the
>>>>>>>> PMD code, which will be added in later commits.
>>>>>>>>
>>>>>>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
>>>>>>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
>>>>>>>>
>>>>>>>
>>>>>>> Hi Mykola, Christiam,
>>>>>>>
>>>>>>> This PMD scares me, overall it is a big drop:
>>>>>>> "249 files changed, 87128 insertions(+)"
>>>>>>>
>>>>>>> I think it is not possible to review all in one release cycle, and
>>>>>>> it is not even possible to say if all code used or not.
>>>>>>>
>>>>>>> I can see code is already developed, and it is difficult to
>>>>>>> restructure developed code, but restructure it into small pieces
>>>>>>> really helps for reviews.
>>>>>>>
>>>>>>>
>>>>>>> Driver supports good list of features, can it be possible to
>>>>>>> distribute upstream effort into multiple release.
>>>>>>> Starting from basic functionality and add features gradually.
>>>>>>> Target for this release can be providing datapath, and add more if
>>>>>>> we have time in the release, what do you think?
>>>
>>> I was expecting to get only Rx/Tx in this release, not really more.
>>>
>>> I agree it may be interesting to discuss some design and check whether
>>> we need more features in ethdev as part of the driver upstreaming
>>> process.
>>>
>>>
>>>>>>> Also there are large amount of base code (HAL / FPGA code),
>>>>>>> instead of adding them as a bulk, relevant ones with a feature can
>>>>>>> be added with the feature patch, this eliminates dead code in the
>>>>>>> base code layer, also helps user/review to understand the link
>>>>>>> between driver code and base code.
>>>
>>> Yes it would be interesting to see what is really needed for the basic
>>> initialization and what is linked to a specific offload or configuration feature.
>>>
>>> As a maintainer, I have to do some changes across all drivers
>>> sometimes, and I use git blame a lot to understand why something was added.
>>>
>>>
>>>>>> Jumping in here with an opinion about welcoming new NIC vendors to the community:
>>>>>>
>>>>>> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.
>>>>>>
>>>>>> If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.
>>>>>>
>>>>>
>>>>> Hi Morten,
>>>>>
>>>>> I was thinking same before making my above comment, what happens if vendors submit as one big patch and when a problem occurs we can ask owner to fix. Probably this makes vendor happy and makes my life (or any other maintainer's life) easier, it is always easier to say yes.
>>>>>
>>>>>
>>>>> But I come up with two main reasons to ask for a rework:
>>>>>
>>>>> 1- Technically any vendor can deliver their software to their
>>>>> customers via a public git repository, they don't have to upstream
>>>>> to
>>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fdpdk.org&c=E,1,N
>>>>> poJejuuvPdOPfcFJYtsmkQF6PVrDjGsZ8x_gi5xDrTyZokK_nM11u4ZpzHgM10J9bOLl
>>>>> nhoR6fFAzWtCzOhRCzVruYj520zZORv6-MjJeSC5TrGnIFL&typo=1,
>>>>> but upstreaming has many benefits.
>>>>>
>>>>> One of those benefits is upstreaming provides a quality assurance for vendor's customers (that is why customer can be asking for this, as we are having in many cases), and this quality assurance comes from additional eyes reviewing the code and guiding vendors for the DPDK quality standards (some vendors already doing pretty good, but new ones sometimes requires hand-holding).
>>>>>
>>>>> If driver is one big patch series, it is practically not possible to review it, I can catch a few bits here or there, you may some others, but practically it will be merged without review, and we will fail on our quality assurance task.
>>>>>
>>>>> 2- Make code more accessible to the rest of the world.
>>>>>
>>>>> When it is a big patch, code can be functional but lots of details, reasoning, relation between components gets lost, which makes it even harder for an external developer, like me, to understand it (I am a mere guinea pig here :).
>>>>>
>>>>> If a customer would like to add a feature themselves, or fix something, even after vendor no more working on that product anymore, customer needs to understand the code or some reasoning in the code.
>>>>> Or if someone wants to backport the driver to rust, or a DPDK developer wants to do a rework that requires updating all drivers, or a tester would like to analyze the code to figure out behavior difference of the devices. I think I have witness all above cases in real life.
>>>>>
>>>>> If driver is split into more patches, it makes patch easier to understand which makes code practically more accessible to other developers that are not expert in driver.
>>>
>>> I fully agree about the 2 reasons for upstreaming piece by piece.
>>>
>>>
>>>>> Overall, yes splitting patch takes time and effort, and yes this is an overhead for a code that is already developed, but I think benefit is big so it worth doing the task.
>>>
>>> In the meantime, if some features are not yet upstreamed in a release,
>>> a user can apply the missing patches from the mailing list to get the features.
>>>
>>>
>>>>>> We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.
>>>
>>> You're right Morten, we try to be as welcoming as possible, but as
>>> Ferruh said, we want to be able to understand how a driver is built,
>>> even if not understanding all details.
>>>
>>> In Open Source, I think not only the code should be available, we must
>>> also take care of explanations and documentation.
>>>
>>>
>>>>> Agree to not make upstreaming difficult for new vendors, and indeed we are encouraging more vendors to be upstream their code, this is in best interest of both sides.
>>>>>
>>>>> Distributing upstreaming effort to a year was just a suggestion, it can go in earlier as it is becomes ready but I can see it will take time to split driver into features and upstream them.
>>>
>>> Driver features can be added until -rc2 (in one month).
>>>
>>>
>>>>> As I am from a vendor too, I can understand the product/customer pressure, but I hope this approach can encourage vendors start upstreaming early or even better upstream as they develop the code.
>>>>
>>>> Hi Ferruh,
>>>>
>>>> First of all, thank you for starting the work to review our code.
>>>>
>>>> As Morten said Napatech plans to take all responsibility for the
>>>> quality of the PMD source code. We expect to provide all fixes needed
>>>> in the future. If for some reason Napatech stops maintaining the
>>>> code, then we have been informed that the DPDK community might delete
>>>> the PMD from the repository, and we understand that.
>>>>
>>>> In regards to splitting the code, I don't see this a good option.
>>>> While I of cause agree it would be easier to review and understand,
>>>> the code should also result in a meaningful product. Of the 87k lines
>>>> of code, 53k lines is needed to start-up the FPGA to a state the it
>>>> is ready to receive traffic. But at this point all packets would
>>>> simply be discarded, and to be honest, there are better and cheaper
>>>> options out there, if nothing more than basic functionality is
>>>> needed. 34k lines are used to setup filters based on rte_flow. The
>>>> thing is, that you need to initialize all modules in the FPGA TX- and
>>>> RX-pipelines with valid data, even if you don't need the features those modules provide.
>>>> As a result, if you split up the 34k lines, then the product would
>>>> not be functional. Of cause some of the top level logic could be
>>>> split out, but at this point we are talking about splitting 87k lines
>>>> into 80k and 7k, which I don't think is worth it.
>>>
>>> Actually I think it is worth.
>>> There is a benefit in isolating the small basic init part from the
>>> more complex features.
>>>
>>>
>>>>>>> As far as I understand last patch opens a socket interface and an
>>>>>>> external application can sent control commands via this interface.
>>>>>>> I am not sure about this side control channel, what is missing in
>>>>>>> the DPDK API? Can we try to address them in the DPDK layer instead
>>>>>>> of a driver specific solution?
>>>>>>
>>>>>> That would be great.
>>>>>>
>>>>>> AFAIK, other vendors also has a bunch of out-of-band communication,
>>>>>> e.g. magical EAL parameters to the MLX drivers. So let's not be too
>>>>>> hard on the newcomers. ;-)
>>>>>>
>>>>>
>>>>> I did some thinking for this one too,
>>>>>
>>>>> As we are in userspace, it is easy to have side control channel, and this can make users life easy, so this is a practical thing to do.
>>>>> (Indeed there are already some ways to do this, without PMD exposing
>>>>> a socket interface.)
>>>>>
>>>>> But this also reduces effort developers putting on DPDK layer solution, because it is always easier to add more support to the driver only.
>>>>> And overall this reduces portability of the DPDK application, each
>>>>> application becomes unique to a device (This is a bad thing, but I
>>>>> also need some feedback how bad it is in real life.)
>>>>>
>>>>> To balance this, we said if a feature is too specific to a device, it can add device specific API and this is better than device specific features pollute the common, most used code. And push back to introduce more new PMD specific APIs unless it is really needed.
>>>>>
>>>>> But creating a socket interface directly from the driver is more than PMD specific API. Technically application control interface can rely completely to this. Even we assume this is not for control, but just for debug, I can see it can be useful for debug and again practical thing to do, I am still not sure how much it hurts if each driver has a custom socket interface for their debug needs.
>>>>>
>>>>> Overall it makes more sense to me to have a unified/common interface from drivers to DPDK applications, which is through the ethdev layer.
>>>>> And improve and extend the ethdev layer to satisfy driver needs.
>>>>>
>>>>> In this specific example, I am for rejecting the socket interface patch, but I would like to get more feedback from @techboard.
>>>>>
>>>>
>>>> The reason we have the addition control channel is not provide
>>>> additional functionality. We have customers with use-cases that
>>>> require multiple processes. Since Napatech adapters do not support
>>>> configuration through VFs, then secondary applications must send
>>>> their rte_flow to a main application, which will then setup the flow
>>>> through it's PF. This control channel "hides" these details, and make
>>>> the product easier for users to adapt to their existing solutions.
>>>
>>> I think you need to explore VF representors.
>>> This is what is done with other drivers, and it make them compatible.
>>>
>>>> If you stand firm on rejecting the control channel, then we have to
>>>> go back to the drawing board on this issue. We did look at DPDK's
>>>> multi-process support, and actually had some support for this, but we
>>>> determined that for our use-case it was better to have a
>>>> communication channel, and no shared memory.
>>>
>>> I'm not sure your need is about secondary process.
>>> Let's discuss this need in a meeting if needed.
>>> Anyway, the message is that we want to be part of such design decision.
>>>
>>>
>>>>> And related to not being too hard on the newcomers, unrelated to being a newcomer or not, if a process/feature/approach approved once, some others will point to it and will ask to do the same which is fair in their perspective. I had multiple instance of this in the past.
>>>>>
>>>>> Of course we are being easy to newcomers but not in a way to allow
>>>>> code that we believe is not good thing to do, but going easy on
>>>>> process may be :)
>>>>>
>>>>
>>>> We are grateful for any leniency you may show us ;-)
>>>>
>>>> Thanks again,
>>>> Christian
>>>>
>>>>>
>>>>>>>
>>>>>>>
>>>>>>> Thanks,
>>>>>>> ferruh
>>>>>>
>>>>>> Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!
>>>>>>
>>>>>> Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.
>>>>>>
>>>>>> -Morten
>>>
>>>
>>> We are going to discuss the process in the technical board today.
>>>
>>>
>>
>> Hi Mykola, Christiam,
>>
>> As discussed, following are a few good examples from the DPDK history, there is no "fits all, fixed guidelines", but they can serve as samples:
>>
>> Marvell cnxk:
>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2flist%2f%3fseries%3d17449%26state%3d%252A%26archive%3dboth&c=E,1,DmXU0iHwXoSaZ4bKn-yhX9J8XmFBispd2ut7pxLNBkK3Q4LVpG_zmOf1jnWSS-Y0Fx-TNbPnQDHyBZkDj23Gu7zjPZ5nsA7pid5CsE2vxNk,&typo=1
>>
>>
>> Solarflare sfc (before patchwork series support):
>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-2-git-send-email-arybchenko%40solarflare.com%2f&c=E,1,E9oUT_1WuNC2JA8x7an3rC_Pm5g1L5cxJKQ6pTwSbCWSJpiLH2GnmgfFkUqViOOwkpS2df8kgBvHjmulKaWhyr4BBizUT-sL5LJv21Hx4RtHtK3vjhcKpg,,&typo=1
>> to
>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-56-git-send-email-arybchenko%40solarflare.com%2f&c=E,1,GByF_TiC_q11iVPpiPgpCMlSge-J0XfT0zHkriK0rde1Qt1RG7uf6mETQkTSQ-1V86Z7EtRcxlvSsed1sqn8RWfN8KFSbd7NaAkfbDiehn_vSRzja45rQgv53Q,,&typo=1
>>
>>
>> Intel ice:
>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2flist%2f%3fseries%3d2842%26state%3d%252A%26archive%3dboth&c=E,1,zQwvAIR3ToLIhT09bVxm_HEF-dp8eyTqhsKB3eOYgIJdd2WS_0ZlTbQKfr9KLyTA3A2A2HzBbjIlz21D_hWVgS_INmmC5eew1J0QBH-PoRNd&typo=1
>>
> 
> Thank you for the links, they have been very helpful.
> 
> After a lot of internal discussion, Napatech has decided to implement some architectural changes to our PMD that will allow us to easier split up the code into smaller features. The work will require some time, which means that we will not be ready for the 23.11 release. The current goal is to attempt to upstream a quite basic PMD in time for 24.7, and a fully featured PMD for 24.11.
> 
> 

Hi Christiam,

Good to see there is a solid plan for upstreaming but also not that good
that it is postponed,

I am aware it is all tied to your internal planning/resourcing etc, but
since the effort already started, can it be possible to squeeze very
basic driver in this release, which just does link up and most basic Rx/Tx?
It gives opportunity to experiment on device to users.

We can accept it up to -rc3, which is end of October, so there is still
some time?

This is just a suggestion though, no pressure intended.


^ permalink raw reply	[flat|nested] 142+ messages in thread

* Re: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-09-29  9:46                 ` Ferruh Yigit
@ 2023-09-29 10:23                   ` Thomas Monjalon
  2023-10-09  7:57                     ` Christian Koue Muf
  0 siblings, 1 reply; 142+ messages in thread
From: Thomas Monjalon @ 2023-09-29 10:23 UTC (permalink / raw)
  To: Christian Koue Muf, Morten Brørup, Mykola Kostenok, Ferruh Yigit
  Cc: dev, andrew.rybchenko, techboard

29/09/2023 11:46, Ferruh Yigit:
> On 9/29/2023 10:21 AM, Christian Koue Muf wrote:
> > On 9/21/2023 4:05 PM, Ferruh Yigit wrote:
> >> On 9/20/2023 2:17 PM, Thomas Monjalon wrote:
> >>> Hello,
> >>>
> >>> 19/09/2023 11:06, Christian Koue Muf:
> >>>> On 9/18/23 10:34 AM, Ferruh Yigit wrote:
> >>>>> On 9/15/2023 7:37 PM, Morten Brørup wrote:
> >>>>>>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
> >>>>>>> Sent: Friday, 15 September 2023 17.55
> >>>>>>>
> >>>>>>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
> >>>>>>>> From: Christian Koue Muf <ckm@napatech.com>
> >>>>>>>>
> >>>>>>>> The NTNIC PMD does not rely on a kernel space Napatech driver,
> >>>>>>>> thus all defines related to the register layout is part of the
> >>>>>>>> PMD code, which will be added in later commits.
> >>>>>>>>
> >>>>>>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
> >>>>>>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
> >>>>>>>>
> >>>>>>>
> >>>>>>> Hi Mykola, Christiam,
> >>>>>>>
> >>>>>>> This PMD scares me, overall it is a big drop:
> >>>>>>> "249 files changed, 87128 insertions(+)"
> >>>>>>>
> >>>>>>> I think it is not possible to review all in one release cycle, and
> >>>>>>> it is not even possible to say if all code used or not.
> >>>>>>>
> >>>>>>> I can see code is already developed, and it is difficult to
> >>>>>>> restructure developed code, but restructure it into small pieces
> >>>>>>> really helps for reviews.
> >>>>>>>
> >>>>>>>
> >>>>>>> Driver supports good list of features, can it be possible to
> >>>>>>> distribute upstream effort into multiple release.
> >>>>>>> Starting from basic functionality and add features gradually.
> >>>>>>> Target for this release can be providing datapath, and add more if
> >>>>>>> we have time in the release, what do you think?
> >>>
> >>> I was expecting to get only Rx/Tx in this release, not really more.
> >>>
> >>> I agree it may be interesting to discuss some design and check whether
> >>> we need more features in ethdev as part of the driver upstreaming
> >>> process.
> >>>
> >>>
> >>>>>>> Also there are large amount of base code (HAL / FPGA code),
> >>>>>>> instead of adding them as a bulk, relevant ones with a feature can
> >>>>>>> be added with the feature patch, this eliminates dead code in the
> >>>>>>> base code layer, also helps user/review to understand the link
> >>>>>>> between driver code and base code.
> >>>
> >>> Yes it would be interesting to see what is really needed for the basic
> >>> initialization and what is linked to a specific offload or configuration feature.
> >>>
> >>> As a maintainer, I have to do some changes across all drivers
> >>> sometimes, and I use git blame a lot to understand why something was added.
> >>>
> >>>
> >>>>>> Jumping in here with an opinion about welcoming new NIC vendors to the community:
> >>>>>>
> >>>>>> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.
> >>>>>>
> >>>>>> If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.
> >>>>>>
> >>>>>
> >>>>> Hi Morten,
> >>>>>
> >>>>> I was thinking same before making my above comment, what happens if vendors submit as one big patch and when a problem occurs we can ask owner to fix. Probably this makes vendor happy and makes my life (or any other maintainer's life) easier, it is always easier to say yes.
> >>>>>
> >>>>>
> >>>>> But I come up with two main reasons to ask for a rework:
> >>>>>
> >>>>> 1- Technically any vendor can deliver their software to their
> >>>>> customers via a public git repository, they don't have to upstream
> >>>>> to
> >>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fdpdk.org&c=E,1,N
> >>>>> poJejuuvPdOPfcFJYtsmkQF6PVrDjGsZ8x_gi5xDrTyZokK_nM11u4ZpzHgM10J9bOLl
> >>>>> nhoR6fFAzWtCzOhRCzVruYj520zZORv6-MjJeSC5TrGnIFL&typo=1,
> >>>>> but upstreaming has many benefits.
> >>>>>
> >>>>> One of those benefits is upstreaming provides a quality assurance for vendor's customers (that is why customer can be asking for this, as we are having in many cases), and this quality assurance comes from additional eyes reviewing the code and guiding vendors for the DPDK quality standards (some vendors already doing pretty good, but new ones sometimes requires hand-holding).
> >>>>>
> >>>>> If driver is one big patch series, it is practically not possible to review it, I can catch a few bits here or there, you may some others, but practically it will be merged without review, and we will fail on our quality assurance task.
> >>>>>
> >>>>> 2- Make code more accessible to the rest of the world.
> >>>>>
> >>>>> When it is a big patch, code can be functional but lots of details, reasoning, relation between components gets lost, which makes it even harder for an external developer, like me, to understand it (I am a mere guinea pig here :).
> >>>>>
> >>>>> If a customer would like to add a feature themselves, or fix something, even after vendor no more working on that product anymore, customer needs to understand the code or some reasoning in the code.
> >>>>> Or if someone wants to backport the driver to rust, or a DPDK developer wants to do a rework that requires updating all drivers, or a tester would like to analyze the code to figure out behavior difference of the devices. I think I have witness all above cases in real life.
> >>>>>
> >>>>> If driver is split into more patches, it makes patch easier to understand which makes code practically more accessible to other developers that are not expert in driver.
> >>>
> >>> I fully agree about the 2 reasons for upstreaming piece by piece.
> >>>
> >>>
> >>>>> Overall, yes splitting patch takes time and effort, and yes this is an overhead for a code that is already developed, but I think benefit is big so it worth doing the task.
> >>>
> >>> In the meantime, if some features are not yet upstreamed in a release,
> >>> a user can apply the missing patches from the mailing list to get the features.
> >>>
> >>>
> >>>>>> We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.
> >>>
> >>> You're right Morten, we try to be as welcoming as possible, but as
> >>> Ferruh said, we want to be able to understand how a driver is built,
> >>> even if not understanding all details.
> >>>
> >>> In Open Source, I think not only the code should be available, we must
> >>> also take care of explanations and documentation.
> >>>
> >>>
> >>>>> Agree to not make upstreaming difficult for new vendors, and indeed we are encouraging more vendors to be upstream their code, this is in best interest of both sides.
> >>>>>
> >>>>> Distributing upstreaming effort to a year was just a suggestion, it can go in earlier as it is becomes ready but I can see it will take time to split driver into features and upstream them.
> >>>
> >>> Driver features can be added until -rc2 (in one month).
> >>>
> >>>
> >>>>> As I am from a vendor too, I can understand the product/customer pressure, but I hope this approach can encourage vendors start upstreaming early or even better upstream as they develop the code.
> >>>>
> >>>> Hi Ferruh,
> >>>>
> >>>> First of all, thank you for starting the work to review our code.
> >>>>
> >>>> As Morten said Napatech plans to take all responsibility for the
> >>>> quality of the PMD source code. We expect to provide all fixes needed
> >>>> in the future. If for some reason Napatech stops maintaining the
> >>>> code, then we have been informed that the DPDK community might delete
> >>>> the PMD from the repository, and we understand that.
> >>>>
> >>>> In regards to splitting the code, I don't see this a good option.
> >>>> While I of cause agree it would be easier to review and understand,
> >>>> the code should also result in a meaningful product. Of the 87k lines
> >>>> of code, 53k lines is needed to start-up the FPGA to a state the it
> >>>> is ready to receive traffic. But at this point all packets would
> >>>> simply be discarded, and to be honest, there are better and cheaper
> >>>> options out there, if nothing more than basic functionality is
> >>>> needed. 34k lines are used to setup filters based on rte_flow. The
> >>>> thing is, that you need to initialize all modules in the FPGA TX- and
> >>>> RX-pipelines with valid data, even if you don't need the features those modules provide.
> >>>> As a result, if you split up the 34k lines, then the product would
> >>>> not be functional. Of cause some of the top level logic could be
> >>>> split out, but at this point we are talking about splitting 87k lines
> >>>> into 80k and 7k, which I don't think is worth it.
> >>>
> >>> Actually I think it is worth.
> >>> There is a benefit in isolating the small basic init part from the
> >>> more complex features.
> >>>
> >>>
> >>>>>>> As far as I understand last patch opens a socket interface and an
> >>>>>>> external application can sent control commands via this interface.
> >>>>>>> I am not sure about this side control channel, what is missing in
> >>>>>>> the DPDK API? Can we try to address them in the DPDK layer instead
> >>>>>>> of a driver specific solution?
> >>>>>>
> >>>>>> That would be great.
> >>>>>>
> >>>>>> AFAIK, other vendors also has a bunch of out-of-band communication,
> >>>>>> e.g. magical EAL parameters to the MLX drivers. So let's not be too
> >>>>>> hard on the newcomers. ;-)
> >>>>>>
> >>>>>
> >>>>> I did some thinking for this one too,
> >>>>>
> >>>>> As we are in userspace, it is easy to have side control channel, and this can make users life easy, so this is a practical thing to do.
> >>>>> (Indeed there are already some ways to do this, without PMD exposing
> >>>>> a socket interface.)
> >>>>>
> >>>>> But this also reduces effort developers putting on DPDK layer solution, because it is always easier to add more support to the driver only.
> >>>>> And overall this reduces portability of the DPDK application, each
> >>>>> application becomes unique to a device (This is a bad thing, but I
> >>>>> also need some feedback how bad it is in real life.)
> >>>>>
> >>>>> To balance this, we said if a feature is too specific to a device, it can add device specific API and this is better than device specific features pollute the common, most used code. And push back to introduce more new PMD specific APIs unless it is really needed.
> >>>>>
> >>>>> But creating a socket interface directly from the driver is more than PMD specific API. Technically application control interface can rely completely to this. Even we assume this is not for control, but just for debug, I can see it can be useful for debug and again practical thing to do, I am still not sure how much it hurts if each driver has a custom socket interface for their debug needs.
> >>>>>
> >>>>> Overall it makes more sense to me to have a unified/common interface from drivers to DPDK applications, which is through the ethdev layer.
> >>>>> And improve and extend the ethdev layer to satisfy driver needs.
> >>>>>
> >>>>> In this specific example, I am for rejecting the socket interface patch, but I would like to get more feedback from @techboard.
> >>>>>
> >>>>
> >>>> The reason we have the addition control channel is not provide
> >>>> additional functionality. We have customers with use-cases that
> >>>> require multiple processes. Since Napatech adapters do not support
> >>>> configuration through VFs, then secondary applications must send
> >>>> their rte_flow to a main application, which will then setup the flow
> >>>> through it's PF. This control channel "hides" these details, and make
> >>>> the product easier for users to adapt to their existing solutions.
> >>>
> >>> I think you need to explore VF representors.
> >>> This is what is done with other drivers, and it make them compatible.
> >>>
> >>>> If you stand firm on rejecting the control channel, then we have to
> >>>> go back to the drawing board on this issue. We did look at DPDK's
> >>>> multi-process support, and actually had some support for this, but we
> >>>> determined that for our use-case it was better to have a
> >>>> communication channel, and no shared memory.
> >>>
> >>> I'm not sure your need is about secondary process.
> >>> Let's discuss this need in a meeting if needed.
> >>> Anyway, the message is that we want to be part of such design decision.
> >>>
> >>>
> >>>>> And related to not being too hard on the newcomers, unrelated to being a newcomer or not, if a process/feature/approach approved once, some others will point to it and will ask to do the same which is fair in their perspective. I had multiple instance of this in the past.
> >>>>>
> >>>>> Of course we are being easy to newcomers but not in a way to allow
> >>>>> code that we believe is not good thing to do, but going easy on
> >>>>> process may be :)
> >>>>>
> >>>>
> >>>> We are grateful for any leniency you may show us ;-)
> >>>>
> >>>> Thanks again,
> >>>> Christian
> >>>>
> >>>>>
> >>>>>>>
> >>>>>>>
> >>>>>>> Thanks,
> >>>>>>> ferruh
> >>>>>>
> >>>>>> Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!
> >>>>>>
> >>>>>> Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.
> >>>>>>
> >>>>>> -Morten
> >>>
> >>>
> >>> We are going to discuss the process in the technical board today.
> >>>
> >>>
> >>
> >> Hi Mykola, Christiam,
> >>
> >> As discussed, following are a few good examples from the DPDK history, there is no "fits all, fixed guidelines", but they can serve as samples:
> >>
> >> Marvell cnxk:
> >> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2flist%2f%3fseries%3d17449%26state%3d%252A%26archive%3dboth&c=E,1,DmXU0iHwXoSaZ4bKn-yhX9J8XmFBispd2ut7pxLNBkK3Q4LVpG_zmOf1jnWSS-Y0Fx-TNbPnQDHyBZkDj23Gu7zjPZ5nsA7pid5CsE2vxNk,&typo=1
> >>
> >>
> >> Solarflare sfc (before patchwork series support):
> >> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-2-git-send-email-arybchenko%40solarflare.com%2f&c=E,1,E9oUT_1WuNC2JA8x7an3rC_Pm5g1L5cxJKQ6pTwSbCWSJpiLH2GnmgfFkUqViOOwkpS2df8kgBvHjmulKaWhyr4BBizUT-sL5LJv21Hx4RtHtK3vjhcKpg,,&typo=1
> >> to
> >> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-56-git-send-email-arybchenko%40solarflare.com%2f&c=E,1,GByF_TiC_q11iVPpiPgpCMlSge-J0XfT0zHkriK0rde1Qt1RG7uf6mETQkTSQ-1V86Z7EtRcxlvSsed1sqn8RWfN8KFSbd7NaAkfbDiehn_vSRzja45rQgv53Q,,&typo=1
> >>
> >>
> >> Intel ice:
> >> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.org%2fproject%2fdpdk%2flist%2f%3fseries%3d2842%26state%3d%252A%26archive%3dboth&c=E,1,zQwvAIR3ToLIhT09bVxm_HEF-dp8eyTqhsKB3eOYgIJdd2WS_0ZlTbQKfr9KLyTA3A2A2HzBbjIlz21D_hWVgS_INmmC5eew1J0QBH-PoRNd&typo=1
> >>
> > 
> > Thank you for the links, they have been very helpful.
> > 
> > After a lot of internal discussion, Napatech has decided to implement some architectural changes to our PMD that will allow us to easier split up the code into smaller features. The work will require some time, which means that we will not be ready for the 23.11 release. The current goal is to attempt to upstream a quite basic PMD in time for 24.7, and a fully featured PMD for 24.11.
> > 
> > 
> 
> Hi Christiam,
> 
> Good to see there is a solid plan for upstreaming but also not that good
> that it is postponed,
> 
> I am aware it is all tied to your internal planning/resourcing etc, but
> since the effort already started, can it be possible to squeeze very
> basic driver in this release, which just does link up and most basic Rx/Tx?
> It gives opportunity to experiment on device to users.
> 
> We can accept it up to -rc3, which is end of October, so there is still
> some time?
> 
> This is just a suggestion though, no pressure intended.

I agree with Ferruh, better to start early and small.
It shouldn't be too hard to introduce the skeleton of the driver.





^ permalink raw reply	[flat|nested] 142+ messages in thread

* RE: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-09-29 10:23                   ` Thomas Monjalon
@ 2023-10-09  7:57                     ` Christian Koue Muf
  2023-10-09  9:52                       ` Ferruh Yigit
  2024-03-29 11:24                       ` Ferruh Yigit
  0 siblings, 2 replies; 142+ messages in thread
From: Christian Koue Muf @ 2023-10-09  7:57 UTC (permalink / raw)
  To: Thomas Monjalon, Morten Brørup, Mykola Kostenok, Ferruh Yigit
  Cc: dev, andrew.rybchenko, techboard

On 9/29/2023 12:24 PM, Thomas Monjalon wrote:
>29/09/2023 11:46, Ferruh Yigit:
>> On 9/29/2023 10:21 AM, Christian Koue Muf wrote:
>> > On 9/21/2023 4:05 PM, Ferruh Yigit wrote:
>> >> On 9/20/2023 2:17 PM, Thomas Monjalon wrote:
>> >>> Hello,
>> >>>
>> >>> 19/09/2023 11:06, Christian Koue Muf:
>> >>>> On 9/18/23 10:34 AM, Ferruh Yigit wrote:
>> >>>>> On 9/15/2023 7:37 PM, Morten Brørup wrote:
>> >>>>>>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
>> >>>>>>> Sent: Friday, 15 September 2023 17.55
>> >>>>>>>
>> >>>>>>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
>> >>>>>>>> From: Christian Koue Muf <ckm@napatech.com>
>> >>>>>>>>
>> >>>>>>>> The NTNIC PMD does not rely on a kernel space Napatech 
>> >>>>>>>> driver, thus all defines related to the register layout is 
>> >>>>>>>> part of the PMD code, which will be added in later commits.
>> >>>>>>>>
>> >>>>>>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
>> >>>>>>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
>> >>>>>>>>
>> >>>>>>>
>> >>>>>>> Hi Mykola, Christiam,
>> >>>>>>>
>> >>>>>>> This PMD scares me, overall it is a big drop:
>> >>>>>>> "249 files changed, 87128 insertions(+)"
>> >>>>>>>
>> >>>>>>> I think it is not possible to review all in one release cycle, 
>> >>>>>>> and it is not even possible to say if all code used or not.
>> >>>>>>>
>> >>>>>>> I can see code is already developed, and it is difficult to 
>> >>>>>>> restructure developed code, but restructure it into small 
>> >>>>>>> pieces really helps for reviews.
>> >>>>>>>
>> >>>>>>>
>> >>>>>>> Driver supports good list of features, can it be possible to 
>> >>>>>>> distribute upstream effort into multiple release.
>> >>>>>>> Starting from basic functionality and add features gradually.
>> >>>>>>> Target for this release can be providing datapath, and add 
>> >>>>>>> more if we have time in the release, what do you think?
>> >>>
>> >>> I was expecting to get only Rx/Tx in this release, not really more.
>> >>>
>> >>> I agree it may be interesting to discuss some design and check 
>> >>> whether we need more features in ethdev as part of the driver 
>> >>> upstreaming process.
>> >>>
>> >>>
>> >>>>>>> Also there are large amount of base code (HAL / FPGA code), 
>> >>>>>>> instead of adding them as a bulk, relevant ones with a feature 
>> >>>>>>> can be added with the feature patch, this eliminates dead code 
>> >>>>>>> in the base code layer, also helps user/review to understand 
>> >>>>>>> the link between driver code and base code.
>> >>>
>> >>> Yes it would be interesting to see what is really needed for the 
>> >>> basic initialization and what is linked to a specific offload or configuration feature.
>> >>>
>> >>> As a maintainer, I have to do some changes across all drivers 
>> >>> sometimes, and I use git blame a lot to understand why something was added.
>> >>>
>> >>>
>> >>>>>> Jumping in here with an opinion about welcoming new NIC vendors to the community:
>> >>>>>>
>> >>>>>> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.
>> >>>>>>
>> >>>>>> If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.
>> >>>>>>
>> >>>>>
>> >>>>> Hi Morten,
>> >>>>>
>> >>>>> I was thinking same before making my above comment, what happens if vendors submit as one big patch and when a problem occurs we can ask owner to fix. Probably this makes vendor happy and makes my life (or any other maintainer's life) easier, it is always easier to say yes.
>> >>>>>
>> >>>>>
>> >>>>> But I come up with two main reasons to ask for a rework:
>> >>>>>
>> >>>>> 1- Technically any vendor can deliver their software to their 
>> >>>>> customers via a public git repository, they don't have to 
>> >>>>> upstream to 
>> >>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fdpdk.org&c=E
>> >>>>> ,1,N 
>> >>>>> poJejuuvPdOPfcFJYtsmkQF6PVrDjGsZ8x_gi5xDrTyZokK_nM11u4ZpzHgM10J9
>> >>>>> bOLl nhoR6fFAzWtCzOhRCzVruYj520zZORv6-MjJeSC5TrGnIFL&typo=1,
>> >>>>> but upstreaming has many benefits.
>> >>>>>
>> >>>>> One of those benefits is upstreaming provides a quality assurance for vendor's customers (that is why customer can be asking for this, as we are having in many cases), and this quality assurance comes from additional eyes reviewing the code and guiding vendors for the DPDK quality standards (some vendors already doing pretty good, but new ones sometimes requires hand-holding).
>> >>>>>
>> >>>>> If driver is one big patch series, it is practically not possible to review it, I can catch a few bits here or there, you may some others, but practically it will be merged without review, and we will fail on our quality assurance task.
>> >>>>>
>> >>>>> 2- Make code more accessible to the rest of the world.
>> >>>>>
>> >>>>> When it is a big patch, code can be functional but lots of details, reasoning, relation between components gets lost, which makes it even harder for an external developer, like me, to understand it (I am a mere guinea pig here :).
>> >>>>>
>> >>>>> If a customer would like to add a feature themselves, or fix something, even after vendor no more working on that product anymore, customer needs to understand the code or some reasoning in the code.
>> >>>>> Or if someone wants to backport the driver to rust, or a DPDK developer wants to do a rework that requires updating all drivers, or a tester would like to analyze the code to figure out behavior difference of the devices. I think I have witness all above cases in real life.
>> >>>>>
>> >>>>> If driver is split into more patches, it makes patch easier to understand which makes code practically more accessible to other developers that are not expert in driver.
>> >>>
>> >>> I fully agree about the 2 reasons for upstreaming piece by piece.
>> >>>
>> >>>
>> >>>>> Overall, yes splitting patch takes time and effort, and yes this is an overhead for a code that is already developed, but I think benefit is big so it worth doing the task.
>> >>>
>> >>> In the meantime, if some features are not yet upstreamed in a 
>> >>> release, a user can apply the missing patches from the mailing list to get the features.
>> >>>
>> >>>
>> >>>>>> We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.
>> >>>
>> >>> You're right Morten, we try to be as welcoming as possible, but as 
>> >>> Ferruh said, we want to be able to understand how a driver is 
>> >>> built, even if not understanding all details.
>> >>>
>> >>> In Open Source, I think not only the code should be available, we 
>> >>> must also take care of explanations and documentation.
>> >>>
>> >>>
>> >>>>> Agree to not make upstreaming difficult for new vendors, and indeed we are encouraging more vendors to be upstream their code, this is in best interest of both sides.
>> >>>>>
>> >>>>> Distributing upstreaming effort to a year was just a suggestion, it can go in earlier as it is becomes ready but I can see it will take time to split driver into features and upstream them.
>> >>>
>> >>> Driver features can be added until -rc2 (in one month).
>> >>>
>> >>>
>> >>>>> As I am from a vendor too, I can understand the product/customer pressure, but I hope this approach can encourage vendors start upstreaming early or even better upstream as they develop the code.
>> >>>>
>> >>>> Hi Ferruh,
>> >>>>
>> >>>> First of all, thank you for starting the work to review our code.
>> >>>>
>> >>>> As Morten said Napatech plans to take all responsibility for the 
>> >>>> quality of the PMD source code. We expect to provide all fixes 
>> >>>> needed in the future. If for some reason Napatech stops 
>> >>>> maintaining the code, then we have been informed that the DPDK 
>> >>>> community might delete the PMD from the repository, and we understand that.
>> >>>>
>> >>>> In regards to splitting the code, I don't see this a good option.
>> >>>> While I of cause agree it would be easier to review and 
>> >>>> understand, the code should also result in a meaningful product. 
>> >>>> Of the 87k lines of code, 53k lines is needed to start-up the 
>> >>>> FPGA to a state the it is ready to receive traffic. But at this 
>> >>>> point all packets would simply be discarded, and to be honest, 
>> >>>> there are better and cheaper options out there, if nothing more 
>> >>>> than basic functionality is needed. 34k lines are used to setup 
>> >>>> filters based on rte_flow. The thing is, that you need to 
>> >>>> initialize all modules in the FPGA TX- and RX-pipelines with valid data, even if you don't need the features those modules provide.
>> >>>> As a result, if you split up the 34k lines, then the product 
>> >>>> would not be functional. Of cause some of the top level logic 
>> >>>> could be split out, but at this point we are talking about 
>> >>>> splitting 87k lines into 80k and 7k, which I don't think is worth it.
>> >>>
>> >>> Actually I think it is worth.
>> >>> There is a benefit in isolating the small basic init part from the 
>> >>> more complex features.
>> >>>
>> >>>
>> >>>>>>> As far as I understand last patch opens a socket interface and 
>> >>>>>>> an external application can sent control commands via this interface.
>> >>>>>>> I am not sure about this side control channel, what is missing 
>> >>>>>>> in the DPDK API? Can we try to address them in the DPDK layer 
>> >>>>>>> instead of a driver specific solution?
>> >>>>>>
>> >>>>>> That would be great.
>> >>>>>>
>> >>>>>> AFAIK, other vendors also has a bunch of out-of-band 
>> >>>>>> communication, e.g. magical EAL parameters to the MLX drivers. 
>> >>>>>> So let's not be too hard on the newcomers. ;-)
>> >>>>>>
>> >>>>>
>> >>>>> I did some thinking for this one too,
>> >>>>>
>> >>>>> As we are in userspace, it is easy to have side control channel, and this can make users life easy, so this is a practical thing to do.
>> >>>>> (Indeed there are already some ways to do this, without PMD 
>> >>>>> exposing a socket interface.)
>> >>>>>
>> >>>>> But this also reduces effort developers putting on DPDK layer solution, because it is always easier to add more support to the driver only.
>> >>>>> And overall this reduces portability of the DPDK application, 
>> >>>>> each application becomes unique to a device (This is a bad 
>> >>>>> thing, but I also need some feedback how bad it is in real 
>> >>>>> life.)
>> >>>>>
>> >>>>> To balance this, we said if a feature is too specific to a device, it can add device specific API and this is better than device specific features pollute the common, most used code. And push back to introduce more new PMD specific APIs unless it is really needed.
>> >>>>>
>> >>>>> But creating a socket interface directly from the driver is more than PMD specific API. Technically application control interface can rely completely to this. Even we assume this is not for control, but just for debug, I can see it can be useful for debug and again practical thing to do, I am still not sure how much it hurts if each driver has a custom socket interface for their debug needs.
>> >>>>>
>> >>>>> Overall it makes more sense to me to have a unified/common interface from drivers to DPDK applications, which is through the ethdev layer.
>> >>>>> And improve and extend the ethdev layer to satisfy driver needs.
>> >>>>>
>> >>>>> In this specific example, I am for rejecting the socket interface patch, but I would like to get more feedback from @techboard.
>> >>>>>
>> >>>>
>> >>>> The reason we have the addition control channel is not provide 
>> >>>> additional functionality. We have customers with use-cases that 
>> >>>> require multiple processes. Since Napatech adapters do not 
>> >>>> support configuration through VFs, then secondary applications 
>> >>>> must send their rte_flow to a main application, which will then 
>> >>>> setup the flow through it's PF. This control channel "hides" 
>> >>>> these details, and make the product easier for users to adapt to their existing solutions.
>> >>>
>> >>> I think you need to explore VF representors.
>> >>> This is what is done with other drivers, and it make them compatible.
>> >>>
>> >>>> If you stand firm on rejecting the control channel, then we have 
>> >>>> to go back to the drawing board on this issue. We did look at 
>> >>>> DPDK's multi-process support, and actually had some support for 
>> >>>> this, but we determined that for our use-case it was better to 
>> >>>> have a communication channel, and no shared memory.
>> >>>
>> >>> I'm not sure your need is about secondary process.
>> >>> Let's discuss this need in a meeting if needed.
>> >>> Anyway, the message is that we want to be part of such design decision.
>> >>>
>> >>>
>> >>>>> And related to not being too hard on the newcomers, unrelated to being a newcomer or not, if a process/feature/approach approved once, some others will point to it and will ask to do the same which is fair in their perspective. I had multiple instance of this in the past.
>> >>>>>
>> >>>>> Of course we are being easy to newcomers but not in a way to 
>> >>>>> allow code that we believe is not good thing to do, but going 
>> >>>>> easy on process may be :)
>> >>>>>
>> >>>>
>> >>>> We are grateful for any leniency you may show us ;-)
>> >>>>
>> >>>> Thanks again,
>> >>>> Christian
>> >>>>
>> >>>>>
>> >>>>>>>
>> >>>>>>>
>> >>>>>>> Thanks,
>> >>>>>>> ferruh
>> >>>>>>
>> >>>>>> Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!
>> >>>>>>
>> >>>>>> Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.
>> >>>>>>
>> >>>>>> -Morten
>> >>>
>> >>>
>> >>> We are going to discuss the process in the technical board today.
>> >>>
>> >>>
>> >>
>> >> Hi Mykola, Christiam,
>> >>
>> >> As discussed, following are a few good examples from the DPDK history, there is no "fits all, fixed guidelines", but they can serve as samples:
>> >>
>> >> Marvell cnxk:
>> >> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>> >> org%2fproject%2fdpdk%2flist%2f%3fseries%3d17449%26state%3d%252A%26a
>> >> rchive%3dboth&c=E,1,DmXU0iHwXoSaZ4bKn-yhX9J8XmFBispd2ut7pxLNBkK3Q4L
>> >> VpG_zmOf1jnWSS-Y0Fx-TNbPnQDHyBZkDj23Gu7zjPZ5nsA7pid5CsE2vxNk,&typo=
>> >> 1
>> >>
>> >>
>> >> Solarflare sfc (before patchwork series support):
>> >> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>> >> org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-2-git-send-email-ar
>> >> ybchenko%40solarflare.com%2f&c=E,1,E9oUT_1WuNC2JA8x7an3rC_Pm5g1L5cx
>> >> JKQ6pTwSbCWSJpiLH2GnmgfFkUqViOOwkpS2df8kgBvHjmulKaWhyr4BBizUT-sL5LJ
>> >> v21Hx4RtHtK3vjhcKpg,,&typo=1
>> >> to
>> >> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>> >> org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-56-git-send-email-a
>> >> rybchenko%40solarflare.com%2f&c=E,1,GByF_TiC_q11iVPpiPgpCMlSge-J0Xf
>> >> T0zHkriK0rde1Qt1RG7uf6mETQkTSQ-1V86Z7EtRcxlvSsed1sqn8RWfN8KFSbd7NaA
>> >> kfbDiehn_vSRzja45rQgv53Q,,&typo=1
>> >>
>> >>
>> >> Intel ice:
>> >> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>> >> org%2fproject%2fdpdk%2flist%2f%3fseries%3d2842%26state%3d%252A%26ar
>> >> chive%3dboth&c=E,1,zQwvAIR3ToLIhT09bVxm_HEF-dp8eyTqhsKB3eOYgIJdd2WS
>> >> _0ZlTbQKfr9KLyTA3A2A2HzBbjIlz21D_hWVgS_INmmC5eew1J0QBH-PoRNd&typo=1
>> >>
>> > 
>> > Thank you for the links, they have been very helpful.
>> > 
>> > After a lot of internal discussion, Napatech has decided to implement some architectural changes to our PMD that will allow us to easier split up the code into smaller features. The work will require some time, which means that we will not be ready for the 23.11 release. The current goal is to attempt to upstream a quite basic PMD in time for 24.7, and a fully featured PMD for 24.11.
>> > 
>> > 
>> 
>> Hi Christiam,
>> 
>> Good to see there is a solid plan for upstreaming but also not that 
>> good that it is postponed,
>> 
>> I am aware it is all tied to your internal planning/resourcing etc, 
>> but since the effort already started, can it be possible to squeeze 
>> very basic driver in this release, which just does link up and most basic Rx/Tx?
>> It gives opportunity to experiment on device to users.
>> 
>> We can accept it up to -rc3, which is end of October, so there is 
>> still some time?
>> 
>> This is just a suggestion though, no pressure intended.
>
> I agree with Ferruh, better to start early and small.
> It shouldn't be too hard to introduce the skeleton of the driver.

Hi Ferruh and Thomas,

My apologies for the late response, I have been sick the last week.

We can try to create a small PMD in time. The reason I'm cautious is because Napatech plan to make quite large changes to the PMD, to achieve a more stable and modular code-base. This means that future updates will have quite large diffs, until these changes are in place.


^ permalink raw reply	[flat|nested] 142+ messages in thread

* Re: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-10-09  7:57                     ` Christian Koue Muf
@ 2023-10-09  9:52                       ` Ferruh Yigit
  2024-03-29 11:24                       ` Ferruh Yigit
  1 sibling, 0 replies; 142+ messages in thread
From: Ferruh Yigit @ 2023-10-09  9:52 UTC (permalink / raw)
  To: Christian Koue Muf, Thomas Monjalon, Morten Brørup, Mykola Kostenok
  Cc: dev, andrew.rybchenko, techboard

On 10/9/2023 8:57 AM, Christian Koue Muf wrote:
> On 9/29/2023 12:24 PM, Thomas Monjalon wrote:
>> 29/09/2023 11:46, Ferruh Yigit:
>>> On 9/29/2023 10:21 AM, Christian Koue Muf wrote:
>>>> On 9/21/2023 4:05 PM, Ferruh Yigit wrote:
>>>>> On 9/20/2023 2:17 PM, Thomas Monjalon wrote:
>>>>>> Hello,
>>>>>>
>>>>>> 19/09/2023 11:06, Christian Koue Muf:
>>>>>>> On 9/18/23 10:34 AM, Ferruh Yigit wrote:
>>>>>>>> On 9/15/2023 7:37 PM, Morten Brørup wrote:
>>>>>>>>>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
>>>>>>>>>> Sent: Friday, 15 September 2023 17.55
>>>>>>>>>>
>>>>>>>>>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
>>>>>>>>>>> From: Christian Koue Muf <ckm@napatech.com>
>>>>>>>>>>>
>>>>>>>>>>> The NTNIC PMD does not rely on a kernel space Napatech
>>>>>>>>>>> driver, thus all defines related to the register layout is
>>>>>>>>>>> part of the PMD code, which will be added in later commits.
>>>>>>>>>>>
>>>>>>>>>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
>>>>>>>>>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
>>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> Hi Mykola, Christiam,
>>>>>>>>>>
>>>>>>>>>> This PMD scares me, overall it is a big drop:
>>>>>>>>>> "249 files changed, 87128 insertions(+)"
>>>>>>>>>>
>>>>>>>>>> I think it is not possible to review all in one release cycle,
>>>>>>>>>> and it is not even possible to say if all code used or not.
>>>>>>>>>>
>>>>>>>>>> I can see code is already developed, and it is difficult to
>>>>>>>>>> restructure developed code, but restructure it into small
>>>>>>>>>> pieces really helps for reviews.
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> Driver supports good list of features, can it be possible to
>>>>>>>>>> distribute upstream effort into multiple release.
>>>>>>>>>> Starting from basic functionality and add features gradually.
>>>>>>>>>> Target for this release can be providing datapath, and add
>>>>>>>>>> more if we have time in the release, what do you think?
>>>>>>
>>>>>> I was expecting to get only Rx/Tx in this release, not really more.
>>>>>>
>>>>>> I agree it may be interesting to discuss some design and check
>>>>>> whether we need more features in ethdev as part of the driver
>>>>>> upstreaming process.
>>>>>>
>>>>>>
>>>>>>>>>> Also there are large amount of base code (HAL / FPGA code),
>>>>>>>>>> instead of adding them as a bulk, relevant ones with a feature
>>>>>>>>>> can be added with the feature patch, this eliminates dead code
>>>>>>>>>> in the base code layer, also helps user/review to understand
>>>>>>>>>> the link between driver code and base code.
>>>>>>
>>>>>> Yes it would be interesting to see what is really needed for the
>>>>>> basic initialization and what is linked to a specific offload or configuration feature.
>>>>>>
>>>>>> As a maintainer, I have to do some changes across all drivers
>>>>>> sometimes, and I use git blame a lot to understand why something was added.
>>>>>>
>>>>>>
>>>>>>>>> Jumping in here with an opinion about welcoming new NIC vendors to the community:
>>>>>>>>>
>>>>>>>>> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.
>>>>>>>>>
>>>>>>>>> If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.
>>>>>>>>>
>>>>>>>>
>>>>>>>> Hi Morten,
>>>>>>>>
>>>>>>>> I was thinking same before making my above comment, what happens if vendors submit as one big patch and when a problem occurs we can ask owner to fix. Probably this makes vendor happy and makes my life (or any other maintainer's life) easier, it is always easier to say yes.
>>>>>>>>
>>>>>>>>
>>>>>>>> But I come up with two main reasons to ask for a rework:
>>>>>>>>
>>>>>>>> 1- Technically any vendor can deliver their software to their
>>>>>>>> customers via a public git repository, they don't have to
>>>>>>>> upstream to
>>>>>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fdpdk.org&c=E
>>>>>>>> ,1,N
>>>>>>>> poJejuuvPdOPfcFJYtsmkQF6PVrDjGsZ8x_gi5xDrTyZokK_nM11u4ZpzHgM10J9
>>>>>>>> bOLl nhoR6fFAzWtCzOhRCzVruYj520zZORv6-MjJeSC5TrGnIFL&typo=1,
>>>>>>>> but upstreaming has many benefits.
>>>>>>>>
>>>>>>>> One of those benefits is upstreaming provides a quality assurance for vendor's customers (that is why customer can be asking for this, as we are having in many cases), and this quality assurance comes from additional eyes reviewing the code and guiding vendors for the DPDK quality standards (some vendors already doing pretty good, but new ones sometimes requires hand-holding).
>>>>>>>>
>>>>>>>> If driver is one big patch series, it is practically not possible to review it, I can catch a few bits here or there, you may some others, but practically it will be merged without review, and we will fail on our quality assurance task.
>>>>>>>>
>>>>>>>> 2- Make code more accessible to the rest of the world.
>>>>>>>>
>>>>>>>> When it is a big patch, code can be functional but lots of details, reasoning, relation between components gets lost, which makes it even harder for an external developer, like me, to understand it (I am a mere guinea pig here :).
>>>>>>>>
>>>>>>>> If a customer would like to add a feature themselves, or fix something, even after vendor no more working on that product anymore, customer needs to understand the code or some reasoning in the code.
>>>>>>>> Or if someone wants to backport the driver to rust, or a DPDK developer wants to do a rework that requires updating all drivers, or a tester would like to analyze the code to figure out behavior difference of the devices. I think I have witness all above cases in real life.
>>>>>>>>
>>>>>>>> If driver is split into more patches, it makes patch easier to understand which makes code practically more accessible to other developers that are not expert in driver.
>>>>>>
>>>>>> I fully agree about the 2 reasons for upstreaming piece by piece.
>>>>>>
>>>>>>
>>>>>>>> Overall, yes splitting patch takes time and effort, and yes this is an overhead for a code that is already developed, but I think benefit is big so it worth doing the task.
>>>>>>
>>>>>> In the meantime, if some features are not yet upstreamed in a
>>>>>> release, a user can apply the missing patches from the mailing list to get the features.
>>>>>>
>>>>>>
>>>>>>>>> We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.
>>>>>>
>>>>>> You're right Morten, we try to be as welcoming as possible, but as
>>>>>> Ferruh said, we want to be able to understand how a driver is
>>>>>> built, even if not understanding all details.
>>>>>>
>>>>>> In Open Source, I think not only the code should be available, we
>>>>>> must also take care of explanations and documentation.
>>>>>>
>>>>>>
>>>>>>>> Agree to not make upstreaming difficult for new vendors, and indeed we are encouraging more vendors to be upstream their code, this is in best interest of both sides.
>>>>>>>>
>>>>>>>> Distributing upstreaming effort to a year was just a suggestion, it can go in earlier as it is becomes ready but I can see it will take time to split driver into features and upstream them.
>>>>>>
>>>>>> Driver features can be added until -rc2 (in one month).
>>>>>>
>>>>>>
>>>>>>>> As I am from a vendor too, I can understand the product/customer pressure, but I hope this approach can encourage vendors start upstreaming early or even better upstream as they develop the code.
>>>>>>>
>>>>>>> Hi Ferruh,
>>>>>>>
>>>>>>> First of all, thank you for starting the work to review our code.
>>>>>>>
>>>>>>> As Morten said Napatech plans to take all responsibility for the
>>>>>>> quality of the PMD source code. We expect to provide all fixes
>>>>>>> needed in the future. If for some reason Napatech stops
>>>>>>> maintaining the code, then we have been informed that the DPDK
>>>>>>> community might delete the PMD from the repository, and we understand that.
>>>>>>>
>>>>>>> In regards to splitting the code, I don't see this a good option.
>>>>>>> While I of cause agree it would be easier to review and
>>>>>>> understand, the code should also result in a meaningful product.
>>>>>>> Of the 87k lines of code, 53k lines is needed to start-up the
>>>>>>> FPGA to a state the it is ready to receive traffic. But at this
>>>>>>> point all packets would simply be discarded, and to be honest,
>>>>>>> there are better and cheaper options out there, if nothing more
>>>>>>> than basic functionality is needed. 34k lines are used to setup
>>>>>>> filters based on rte_flow. The thing is, that you need to
>>>>>>> initialize all modules in the FPGA TX- and RX-pipelines with valid data, even if you don't need the features those modules provide.
>>>>>>> As a result, if you split up the 34k lines, then the product
>>>>>>> would not be functional. Of cause some of the top level logic
>>>>>>> could be split out, but at this point we are talking about
>>>>>>> splitting 87k lines into 80k and 7k, which I don't think is worth it.
>>>>>>
>>>>>> Actually I think it is worth.
>>>>>> There is a benefit in isolating the small basic init part from the
>>>>>> more complex features.
>>>>>>
>>>>>>
>>>>>>>>>> As far as I understand last patch opens a socket interface and
>>>>>>>>>> an external application can sent control commands via this interface.
>>>>>>>>>> I am not sure about this side control channel, what is missing
>>>>>>>>>> in the DPDK API? Can we try to address them in the DPDK layer
>>>>>>>>>> instead of a driver specific solution?
>>>>>>>>>
>>>>>>>>> That would be great.
>>>>>>>>>
>>>>>>>>> AFAIK, other vendors also has a bunch of out-of-band
>>>>>>>>> communication, e.g. magical EAL parameters to the MLX drivers.
>>>>>>>>> So let's not be too hard on the newcomers. ;-)
>>>>>>>>>
>>>>>>>>
>>>>>>>> I did some thinking for this one too,
>>>>>>>>
>>>>>>>> As we are in userspace, it is easy to have side control channel, and this can make users life easy, so this is a practical thing to do.
>>>>>>>> (Indeed there are already some ways to do this, without PMD
>>>>>>>> exposing a socket interface.)
>>>>>>>>
>>>>>>>> But this also reduces effort developers putting on DPDK layer solution, because it is always easier to add more support to the driver only.
>>>>>>>> And overall this reduces portability of the DPDK application,
>>>>>>>> each application becomes unique to a device (This is a bad
>>>>>>>> thing, but I also need some feedback how bad it is in real
>>>>>>>> life.)
>>>>>>>>
>>>>>>>> To balance this, we said if a feature is too specific to a device, it can add device specific API and this is better than device specific features pollute the common, most used code. And push back to introduce more new PMD specific APIs unless it is really needed.
>>>>>>>>
>>>>>>>> But creating a socket interface directly from the driver is more than PMD specific API. Technically application control interface can rely completely to this. Even we assume this is not for control, but just for debug, I can see it can be useful for debug and again practical thing to do, I am still not sure how much it hurts if each driver has a custom socket interface for their debug needs.
>>>>>>>>
>>>>>>>> Overall it makes more sense to me to have a unified/common interface from drivers to DPDK applications, which is through the ethdev layer.
>>>>>>>> And improve and extend the ethdev layer to satisfy driver needs.
>>>>>>>>
>>>>>>>> In this specific example, I am for rejecting the socket interface patch, but I would like to get more feedback from @techboard.
>>>>>>>>
>>>>>>>
>>>>>>> The reason we have the addition control channel is not provide
>>>>>>> additional functionality. We have customers with use-cases that
>>>>>>> require multiple processes. Since Napatech adapters do not
>>>>>>> support configuration through VFs, then secondary applications
>>>>>>> must send their rte_flow to a main application, which will then
>>>>>>> setup the flow through it's PF. This control channel "hides"
>>>>>>> these details, and make the product easier for users to adapt to their existing solutions.
>>>>>>
>>>>>> I think you need to explore VF representors.
>>>>>> This is what is done with other drivers, and it make them compatible.
>>>>>>
>>>>>>> If you stand firm on rejecting the control channel, then we have
>>>>>>> to go back to the drawing board on this issue. We did look at
>>>>>>> DPDK's multi-process support, and actually had some support for
>>>>>>> this, but we determined that for our use-case it was better to
>>>>>>> have a communication channel, and no shared memory.
>>>>>>
>>>>>> I'm not sure your need is about secondary process.
>>>>>> Let's discuss this need in a meeting if needed.
>>>>>> Anyway, the message is that we want to be part of such design decision.
>>>>>>
>>>>>>
>>>>>>>> And related to not being too hard on the newcomers, unrelated to being a newcomer or not, if a process/feature/approach approved once, some others will point to it and will ask to do the same which is fair in their perspective. I had multiple instance of this in the past.
>>>>>>>>
>>>>>>>> Of course we are being easy to newcomers but not in a way to
>>>>>>>> allow code that we believe is not good thing to do, but going
>>>>>>>> easy on process may be :)
>>>>>>>>
>>>>>>>
>>>>>>> We are grateful for any leniency you may show us ;-)
>>>>>>>
>>>>>>> Thanks again,
>>>>>>> Christian
>>>>>>>
>>>>>>>>
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> Thanks,
>>>>>>>>>> ferruh
>>>>>>>>>
>>>>>>>>> Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!
>>>>>>>>>
>>>>>>>>> Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.
>>>>>>>>>
>>>>>>>>> -Morten
>>>>>>
>>>>>>
>>>>>> We are going to discuss the process in the technical board today.
>>>>>>
>>>>>>
>>>>>
>>>>> Hi Mykola, Christiam,
>>>>>
>>>>> As discussed, following are a few good examples from the DPDK history, there is no "fits all, fixed guidelines", but they can serve as samples:
>>>>>
>>>>> Marvell cnxk:
>>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>>>>> org%2fproject%2fdpdk%2flist%2f%3fseries%3d17449%26state%3d%252A%26a
>>>>> rchive%3dboth&c=E,1,DmXU0iHwXoSaZ4bKn-yhX9J8XmFBispd2ut7pxLNBkK3Q4L
>>>>> VpG_zmOf1jnWSS-Y0Fx-TNbPnQDHyBZkDj23Gu7zjPZ5nsA7pid5CsE2vxNk,&typo=
>>>>> 1
>>>>>
>>>>>
>>>>> Solarflare sfc (before patchwork series support):
>>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>>>>> org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-2-git-send-email-ar
>>>>> ybchenko%40solarflare.com%2f&c=E,1,E9oUT_1WuNC2JA8x7an3rC_Pm5g1L5cx
>>>>> JKQ6pTwSbCWSJpiLH2GnmgfFkUqViOOwkpS2df8kgBvHjmulKaWhyr4BBizUT-sL5LJ
>>>>> v21Hx4RtHtK3vjhcKpg,,&typo=1
>>>>> to
>>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>>>>> org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-56-git-send-email-a
>>>>> rybchenko%40solarflare.com%2f&c=E,1,GByF_TiC_q11iVPpiPgpCMlSge-J0Xf
>>>>> T0zHkriK0rde1Qt1RG7uf6mETQkTSQ-1V86Z7EtRcxlvSsed1sqn8RWfN8KFSbd7NaA
>>>>> kfbDiehn_vSRzja45rQgv53Q,,&typo=1
>>>>>
>>>>>
>>>>> Intel ice:
>>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>>>>> org%2fproject%2fdpdk%2flist%2f%3fseries%3d2842%26state%3d%252A%26ar
>>>>> chive%3dboth&c=E,1,zQwvAIR3ToLIhT09bVxm_HEF-dp8eyTqhsKB3eOYgIJdd2WS
>>>>> _0ZlTbQKfr9KLyTA3A2A2HzBbjIlz21D_hWVgS_INmmC5eew1J0QBH-PoRNd&typo=1
>>>>>
>>>>
>>>> Thank you for the links, they have been very helpful.
>>>>
>>>> After a lot of internal discussion, Napatech has decided to implement some architectural changes to our PMD that will allow us to easier split up the code into smaller features. The work will require some time, which means that we will not be ready for the 23.11 release. The current goal is to attempt to upstream a quite basic PMD in time for 24.7, and a fully featured PMD for 24.11.
>>>>
>>>>
>>>
>>> Hi Christiam,
>>>
>>> Good to see there is a solid plan for upstreaming but also not that
>>> good that it is postponed,
>>>
>>> I am aware it is all tied to your internal planning/resourcing etc,
>>> but since the effort already started, can it be possible to squeeze
>>> very basic driver in this release, which just does link up and most basic Rx/Tx?
>>> It gives opportunity to experiment on device to users.
>>>
>>> We can accept it up to -rc3, which is end of October, so there is
>>> still some time?
>>>
>>> This is just a suggestion though, no pressure intended.
>>
>> I agree with Ferruh, better to start early and small.
>> It shouldn't be too hard to introduce the skeleton of the driver.
> 
> Hi Ferruh and Thomas,
> 
> My apologies for the late response, I have been sick the last week.
> 
> We can try to create a small PMD in time. The reason I'm cautious is because Napatech plan to make quite large changes to the PMD, to achieve a more stable and modular code-base. This means that future updates will have quite large diffs, until these changes are in place.
> 

I think upstreaming in this release makes more sense if it is the first
step of the gradually improving/growing the driver process.

If you will need to scratch what exists in this release and replace it
with something completely new, I agree these is less value to upstream
in this release.


^ permalink raw reply	[flat|nested] 142+ messages in thread

* Re: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2023-10-09  7:57                     ` Christian Koue Muf
  2023-10-09  9:52                       ` Ferruh Yigit
@ 2024-03-29 11:24                       ` Ferruh Yigit
  2024-04-03 10:55                         ` Mykola Kostenok
  1 sibling, 1 reply; 142+ messages in thread
From: Ferruh Yigit @ 2024-03-29 11:24 UTC (permalink / raw)
  To: Christian Koue Muf, Thomas Monjalon, Morten Brørup, Mykola Kostenok
  Cc: dev, andrew.rybchenko, techboard

On 10/9/2023 8:57 AM, Christian Koue Muf wrote:
> On 9/29/2023 12:24 PM, Thomas Monjalon wrote:
>> 29/09/2023 11:46, Ferruh Yigit:
>>> On 9/29/2023 10:21 AM, Christian Koue Muf wrote:
>>>> On 9/21/2023 4:05 PM, Ferruh Yigit wrote:
>>>>> On 9/20/2023 2:17 PM, Thomas Monjalon wrote:
>>>>>> Hello,
>>>>>>
>>>>>> 19/09/2023 11:06, Christian Koue Muf:
>>>>>>> On 9/18/23 10:34 AM, Ferruh Yigit wrote:
>>>>>>>> On 9/15/2023 7:37 PM, Morten Brørup wrote:
>>>>>>>>>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
>>>>>>>>>> Sent: Friday, 15 September 2023 17.55
>>>>>>>>>>
>>>>>>>>>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
>>>>>>>>>>> From: Christian Koue Muf <ckm@napatech.com>
>>>>>>>>>>>
>>>>>>>>>>> The NTNIC PMD does not rely on a kernel space Napatech
>>>>>>>>>>> driver, thus all defines related to the register layout is
>>>>>>>>>>> part of the PMD code, which will be added in later commits.
>>>>>>>>>>>
>>>>>>>>>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
>>>>>>>>>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
>>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> Hi Mykola, Christiam,
>>>>>>>>>>
>>>>>>>>>> This PMD scares me, overall it is a big drop:
>>>>>>>>>> "249 files changed, 87128 insertions(+)"
>>>>>>>>>>
>>>>>>>>>> I think it is not possible to review all in one release cycle,
>>>>>>>>>> and it is not even possible to say if all code used or not.
>>>>>>>>>>
>>>>>>>>>> I can see code is already developed, and it is difficult to
>>>>>>>>>> restructure developed code, but restructure it into small
>>>>>>>>>> pieces really helps for reviews.
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> Driver supports good list of features, can it be possible to
>>>>>>>>>> distribute upstream effort into multiple release.
>>>>>>>>>> Starting from basic functionality and add features gradually.
>>>>>>>>>> Target for this release can be providing datapath, and add
>>>>>>>>>> more if we have time in the release, what do you think?
>>>>>>
>>>>>> I was expecting to get only Rx/Tx in this release, not really more.
>>>>>>
>>>>>> I agree it may be interesting to discuss some design and check
>>>>>> whether we need more features in ethdev as part of the driver
>>>>>> upstreaming process.
>>>>>>
>>>>>>
>>>>>>>>>> Also there are large amount of base code (HAL / FPGA code),
>>>>>>>>>> instead of adding them as a bulk, relevant ones with a feature
>>>>>>>>>> can be added with the feature patch, this eliminates dead code
>>>>>>>>>> in the base code layer, also helps user/review to understand
>>>>>>>>>> the link between driver code and base code.
>>>>>>
>>>>>> Yes it would be interesting to see what is really needed for the
>>>>>> basic initialization and what is linked to a specific offload or configuration feature.
>>>>>>
>>>>>> As a maintainer, I have to do some changes across all drivers
>>>>>> sometimes, and I use git blame a lot to understand why something was added.
>>>>>>
>>>>>>
>>>>>>>>> Jumping in here with an opinion about welcoming new NIC vendors to the community:
>>>>>>>>>
>>>>>>>>> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the vendor to take responsibility for the quality of the PMD, including providing a maintainer and support backporting of fixes to the PMD in LTS releases. This should align with the vendor's business case for upstreaming their driver.
>>>>>>>>>
>>>>>>>>> If the vendor provides one big patch series, which may be difficult to understand/review, the fallout mainly hits the vendor's customers (and thus the vendor's support organization), not the community as a whole.
>>>>>>>>>
>>>>>>>>
>>>>>>>> Hi Morten,
>>>>>>>>
>>>>>>>> I was thinking same before making my above comment, what happens if vendors submit as one big patch and when a problem occurs we can ask owner to fix. Probably this makes vendor happy and makes my life (or any other maintainer's life) easier, it is always easier to say yes.
>>>>>>>>
>>>>>>>>
>>>>>>>> But I come up with two main reasons to ask for a rework:
>>>>>>>>
>>>>>>>> 1- Technically any vendor can deliver their software to their
>>>>>>>> customers via a public git repository, they don't have to
>>>>>>>> upstream to
>>>>>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fdpdk.org&c=E
>>>>>>>> ,1,N
>>>>>>>> poJejuuvPdOPfcFJYtsmkQF6PVrDjGsZ8x_gi5xDrTyZokK_nM11u4ZpzHgM10J9
>>>>>>>> bOLl nhoR6fFAzWtCzOhRCzVruYj520zZORv6-MjJeSC5TrGnIFL&typo=1,
>>>>>>>> but upstreaming has many benefits.
>>>>>>>>
>>>>>>>> One of those benefits is upstreaming provides a quality assurance for vendor's customers (that is why customer can be asking for this, as we are having in many cases), and this quality assurance comes from additional eyes reviewing the code and guiding vendors for the DPDK quality standards (some vendors already doing pretty good, but new ones sometimes requires hand-holding).
>>>>>>>>
>>>>>>>> If driver is one big patch series, it is practically not possible to review it, I can catch a few bits here or there, you may some others, but practically it will be merged without review, and we will fail on our quality assurance task.
>>>>>>>>
>>>>>>>> 2- Make code more accessible to the rest of the world.
>>>>>>>>
>>>>>>>> When it is a big patch, code can be functional but lots of details, reasoning, relation between components gets lost, which makes it even harder for an external developer, like me, to understand it (I am a mere guinea pig here :).
>>>>>>>>
>>>>>>>> If a customer would like to add a feature themselves, or fix something, even after vendor no more working on that product anymore, customer needs to understand the code or some reasoning in the code.
>>>>>>>> Or if someone wants to backport the driver to rust, or a DPDK developer wants to do a rework that requires updating all drivers, or a tester would like to analyze the code to figure out behavior difference of the devices. I think I have witness all above cases in real life.
>>>>>>>>
>>>>>>>> If driver is split into more patches, it makes patch easier to understand which makes code practically more accessible to other developers that are not expert in driver.
>>>>>>
>>>>>> I fully agree about the 2 reasons for upstreaming piece by piece.
>>>>>>
>>>>>>
>>>>>>>> Overall, yes splitting patch takes time and effort, and yes this is an overhead for a code that is already developed, but I think benefit is big so it worth doing the task.
>>>>>>
>>>>>> In the meantime, if some features are not yet upstreamed in a
>>>>>> release, a user can apply the missing patches from the mailing list to get the features.
>>>>>>
>>>>>>
>>>>>>>>> We, the community, should not make it too difficult for vendors trying to upstream their drivers. I certainly consider it unreasonable to ask a vendor to postpone the release of some existing features by effectively an entire year (considering that only LTS releases are relevant for most of us) because we want the vendor to refactor the patch series to match our preferences within an unrealistic timeframe.
>>>>>>
>>>>>> You're right Morten, we try to be as welcoming as possible, but as
>>>>>> Ferruh said, we want to be able to understand how a driver is
>>>>>> built, even if not understanding all details.
>>>>>>
>>>>>> In Open Source, I think not only the code should be available, we
>>>>>> must also take care of explanations and documentation.
>>>>>>
>>>>>>
>>>>>>>> Agree to not make upstreaming difficult for new vendors, and indeed we are encouraging more vendors to be upstream their code, this is in best interest of both sides.
>>>>>>>>
>>>>>>>> Distributing upstreaming effort to a year was just a suggestion, it can go in earlier as it is becomes ready but I can see it will take time to split driver into features and upstream them.
>>>>>>
>>>>>> Driver features can be added until -rc2 (in one month).
>>>>>>
>>>>>>
>>>>>>>> As I am from a vendor too, I can understand the product/customer pressure, but I hope this approach can encourage vendors start upstreaming early or even better upstream as they develop the code.
>>>>>>>
>>>>>>> Hi Ferruh,
>>>>>>>
>>>>>>> First of all, thank you for starting the work to review our code.
>>>>>>>
>>>>>>> As Morten said Napatech plans to take all responsibility for the
>>>>>>> quality of the PMD source code. We expect to provide all fixes
>>>>>>> needed in the future. If for some reason Napatech stops
>>>>>>> maintaining the code, then we have been informed that the DPDK
>>>>>>> community might delete the PMD from the repository, and we understand that.
>>>>>>>
>>>>>>> In regards to splitting the code, I don't see this a good option.
>>>>>>> While I of cause agree it would be easier to review and
>>>>>>> understand, the code should also result in a meaningful product.
>>>>>>> Of the 87k lines of code, 53k lines is needed to start-up the
>>>>>>> FPGA to a state the it is ready to receive traffic. But at this
>>>>>>> point all packets would simply be discarded, and to be honest,
>>>>>>> there are better and cheaper options out there, if nothing more
>>>>>>> than basic functionality is needed. 34k lines are used to setup
>>>>>>> filters based on rte_flow. The thing is, that you need to
>>>>>>> initialize all modules in the FPGA TX- and RX-pipelines with valid data, even if you don't need the features those modules provide.
>>>>>>> As a result, if you split up the 34k lines, then the product
>>>>>>> would not be functional. Of cause some of the top level logic
>>>>>>> could be split out, but at this point we are talking about
>>>>>>> splitting 87k lines into 80k and 7k, which I don't think is worth it.
>>>>>>
>>>>>> Actually I think it is worth.
>>>>>> There is a benefit in isolating the small basic init part from the
>>>>>> more complex features.
>>>>>>
>>>>>>
>>>>>>>>>> As far as I understand last patch opens a socket interface and
>>>>>>>>>> an external application can sent control commands via this interface.
>>>>>>>>>> I am not sure about this side control channel, what is missing
>>>>>>>>>> in the DPDK API? Can we try to address them in the DPDK layer
>>>>>>>>>> instead of a driver specific solution?
>>>>>>>>>
>>>>>>>>> That would be great.
>>>>>>>>>
>>>>>>>>> AFAIK, other vendors also has a bunch of out-of-band
>>>>>>>>> communication, e.g. magical EAL parameters to the MLX drivers.
>>>>>>>>> So let's not be too hard on the newcomers. ;-)
>>>>>>>>>
>>>>>>>>
>>>>>>>> I did some thinking for this one too,
>>>>>>>>
>>>>>>>> As we are in userspace, it is easy to have side control channel, and this can make users life easy, so this is a practical thing to do.
>>>>>>>> (Indeed there are already some ways to do this, without PMD
>>>>>>>> exposing a socket interface.)
>>>>>>>>
>>>>>>>> But this also reduces effort developers putting on DPDK layer solution, because it is always easier to add more support to the driver only.
>>>>>>>> And overall this reduces portability of the DPDK application,
>>>>>>>> each application becomes unique to a device (This is a bad
>>>>>>>> thing, but I also need some feedback how bad it is in real
>>>>>>>> life.)
>>>>>>>>
>>>>>>>> To balance this, we said if a feature is too specific to a device, it can add device specific API and this is better than device specific features pollute the common, most used code. And push back to introduce more new PMD specific APIs unless it is really needed.
>>>>>>>>
>>>>>>>> But creating a socket interface directly from the driver is more than PMD specific API. Technically application control interface can rely completely to this. Even we assume this is not for control, but just for debug, I can see it can be useful for debug and again practical thing to do, I am still not sure how much it hurts if each driver has a custom socket interface for their debug needs.
>>>>>>>>
>>>>>>>> Overall it makes more sense to me to have a unified/common interface from drivers to DPDK applications, which is through the ethdev layer.
>>>>>>>> And improve and extend the ethdev layer to satisfy driver needs.
>>>>>>>>
>>>>>>>> In this specific example, I am for rejecting the socket interface patch, but I would like to get more feedback from @techboard.
>>>>>>>>
>>>>>>>
>>>>>>> The reason we have the addition control channel is not provide
>>>>>>> additional functionality. We have customers with use-cases that
>>>>>>> require multiple processes. Since Napatech adapters do not
>>>>>>> support configuration through VFs, then secondary applications
>>>>>>> must send their rte_flow to a main application, which will then
>>>>>>> setup the flow through it's PF. This control channel "hides"
>>>>>>> these details, and make the product easier for users to adapt to their existing solutions.
>>>>>>
>>>>>> I think you need to explore VF representors.
>>>>>> This is what is done with other drivers, and it make them compatible.
>>>>>>
>>>>>>> If you stand firm on rejecting the control channel, then we have
>>>>>>> to go back to the drawing board on this issue. We did look at
>>>>>>> DPDK's multi-process support, and actually had some support for
>>>>>>> this, but we determined that for our use-case it was better to
>>>>>>> have a communication channel, and no shared memory.
>>>>>>
>>>>>> I'm not sure your need is about secondary process.
>>>>>> Let's discuss this need in a meeting if needed.
>>>>>> Anyway, the message is that we want to be part of such design decision.
>>>>>>
>>>>>>
>>>>>>>> And related to not being too hard on the newcomers, unrelated to being a newcomer or not, if a process/feature/approach approved once, some others will point to it and will ask to do the same which is fair in their perspective. I had multiple instance of this in the past.
>>>>>>>>
>>>>>>>> Of course we are being easy to newcomers but not in a way to
>>>>>>>> allow code that we believe is not good thing to do, but going
>>>>>>>> easy on process may be :)
>>>>>>>>
>>>>>>>
>>>>>>> We are grateful for any leniency you may show us ;-)
>>>>>>>
>>>>>>> Thanks again,
>>>>>>> Christian
>>>>>>>
>>>>>>>>
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> Thanks,
>>>>>>>>>> ferruh
>>>>>>>>>
>>>>>>>>> Thank you, Ferruh, for taking good care of the community by providing constructive feedback like this to new NIC vendors!
>>>>>>>>>
>>>>>>>>> Please note that my feedback is entirely process related. I didn’t review the driver, so I have no technical comments to the patch series.
>>>>>>>>>
>>>>>>>>> -Morten
>>>>>>
>>>>>>
>>>>>> We are going to discuss the process in the technical board today.
>>>>>>
>>>>>>
>>>>>
>>>>> Hi Mykola, Christiam,
>>>>>
>>>>> As discussed, following are a few good examples from the DPDK history, there is no "fits all, fixed guidelines", but they can serve as samples:
>>>>>
>>>>> Marvell cnxk:
>>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>>>>> org%2fproject%2fdpdk%2flist%2f%3fseries%3d17449%26state%3d%252A%26a
>>>>> rchive%3dboth&c=E,1,DmXU0iHwXoSaZ4bKn-yhX9J8XmFBispd2ut7pxLNBkK3Q4L
>>>>> VpG_zmOf1jnWSS-Y0Fx-TNbPnQDHyBZkDj23Gu7zjPZ5nsA7pid5CsE2vxNk,&typo=
>>>>> 1
>>>>>
>>>>>
>>>>> Solarflare sfc (before patchwork series support):
>>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>>>>> org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-2-git-send-email-ar
>>>>> ybchenko%40solarflare.com%2f&c=E,1,E9oUT_1WuNC2JA8x7an3rC_Pm5g1L5cx
>>>>> JKQ6pTwSbCWSJpiLH2GnmgfFkUqViOOwkpS2df8kgBvHjmulKaWhyr4BBizUT-sL5LJ
>>>>> v21Hx4RtHtK3vjhcKpg,,&typo=1
>>>>> to
>>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>>>>> org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-56-git-send-email-a
>>>>> rybchenko%40solarflare.com%2f&c=E,1,GByF_TiC_q11iVPpiPgpCMlSge-J0Xf
>>>>> T0zHkriK0rde1Qt1RG7uf6mETQkTSQ-1V86Z7EtRcxlvSsed1sqn8RWfN8KFSbd7NaA
>>>>> kfbDiehn_vSRzja45rQgv53Q,,&typo=1
>>>>>
>>>>>
>>>>> Intel ice:
>>>>> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
>>>>> org%2fproject%2fdpdk%2flist%2f%3fseries%3d2842%26state%3d%252A%26ar
>>>>> chive%3dboth&c=E,1,zQwvAIR3ToLIhT09bVxm_HEF-dp8eyTqhsKB3eOYgIJdd2WS
>>>>> _0ZlTbQKfr9KLyTA3A2A2HzBbjIlz21D_hWVgS_INmmC5eew1J0QBH-PoRNd&typo=1
>>>>>
>>>>
>>>> Thank you for the links, they have been very helpful.
>>>>
>>>> After a lot of internal discussion, Napatech has decided to implement some architectural changes to our PMD that will allow us to easier split up the code into smaller features. The work will require some time, which means that we will not be ready for the 23.11 release. The current goal is to attempt to upstream a quite basic PMD in time for 24.7, and a fully featured PMD for 24.11.
>>>>
>>>>
>>>
>>> Hi Christiam,
>>>
>>> Good to see there is a solid plan for upstreaming but also not that
>>> good that it is postponed,
>>>
>>> I am aware it is all tied to your internal planning/resourcing etc,
>>> but since the effort already started, can it be possible to squeeze
>>> very basic driver in this release, which just does link up and most basic Rx/Tx?
>>> It gives opportunity to experiment on device to users.
>>>
>>> We can accept it up to -rc3, which is end of October, so there is
>>> still some time?
>>>
>>> This is just a suggestion though, no pressure intended.
>>
>> I agree with Ferruh, better to start early and small.
>> It shouldn't be too hard to introduce the skeleton of the driver.
> 
> Hi Ferruh and Thomas,
> 
> My apologies for the late response, I have been sick the last week.
> 
> We can try to create a small PMD in time. The reason I'm cautious is because Napatech plan to make quite large changes to the PMD, to achieve a more stable and modular code-base. This means that future updates will have quite large diffs, until these changes are in place.
> 
> 

Hi Christian, Mykola,

What is the status of the 'ntnic'?
Will there be some upstreaming effort for v24.07?


^ permalink raw reply	[flat|nested] 142+ messages in thread

* RE: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2024-03-29 11:24                       ` Ferruh Yigit
@ 2024-04-03 10:55                         ` Mykola Kostenok
  2024-04-04 12:49                           ` Ferruh Yigit
  0 siblings, 1 reply; 142+ messages in thread
From: Mykola Kostenok @ 2024-04-03 10:55 UTC (permalink / raw)
  To: Ferruh Yigit, Christian Koue Muf, Thomas Monjalon, Morten Brørup
  Cc: dev, andrew.rybchenko, techboard

Hi, Ferruh.
Yes, we plan to send new 'ntnic' patch series into v24.07
Right now, we are doing refactoring of 'ntnic' code to make it modular. So, we will send it split into smaller patches.

Best regards, 
Mykola Kostenok.

> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@amd.com>
> Sent: Friday, March 29, 2024 1:25 PM
> To: Christian Koue Muf <ckm@napatech.com>; Thomas Monjalon
> <thomas@monjalon.net>; Morten Brørup <mb@smartsharesystems.com>;
> Mykola Kostenok <mko-plv@napatech.com>
> Cc: dev@dpdk.org; andrew.rybchenko@oktetlabs.ru; techboard@dpdk.org
> Subject: Re: [PATCH v16 1/8] net/ntnic: initial commit which adds register
> defines
> 
> On 10/9/2023 8:57 AM, Christian Koue Muf wrote:
> > On 9/29/2023 12:24 PM, Thomas Monjalon wrote:
> >> 29/09/2023 11:46, Ferruh Yigit:
> >>> On 9/29/2023 10:21 AM, Christian Koue Muf wrote:
> >>>> On 9/21/2023 4:05 PM, Ferruh Yigit wrote:
> >>>>> On 9/20/2023 2:17 PM, Thomas Monjalon wrote:
> >>>>>> Hello,
> >>>>>>
> >>>>>> 19/09/2023 11:06, Christian Koue Muf:
> >>>>>>> On 9/18/23 10:34 AM, Ferruh Yigit wrote:
> >>>>>>>> On 9/15/2023 7:37 PM, Morten Brørup wrote:
> >>>>>>>>>> From: Ferruh Yigit [mailto:ferruh.yigit@amd.com]
> >>>>>>>>>> Sent: Friday, 15 September 2023 17.55
> >>>>>>>>>>
> >>>>>>>>>> On 9/8/2023 5:07 PM, Mykola Kostenok wrote:
> >>>>>>>>>>> From: Christian Koue Muf <ckm@napatech.com>
> >>>>>>>>>>>
> >>>>>>>>>>> The NTNIC PMD does not rely on a kernel space Napatech
> >>>>>>>>>>> driver, thus all defines related to the register layout is
> >>>>>>>>>>> part of the PMD code, which will be added in later commits.
> >>>>>>>>>>>
> >>>>>>>>>>> Signed-off-by: Christian Koue Muf <ckm@napatech.com>
> >>>>>>>>>>> Reviewed-by: Mykola Kostenok <mko-plv@napatech.com>
> >>>>>>>>>>>
> >>>>>>>>>>
> >>>>>>>>>> Hi Mykola, Christiam,
> >>>>>>>>>>
> >>>>>>>>>> This PMD scares me, overall it is a big drop:
> >>>>>>>>>> "249 files changed, 87128 insertions(+)"
> >>>>>>>>>>
> >>>>>>>>>> I think it is not possible to review all in one release
> >>>>>>>>>> cycle, and it is not even possible to say if all code used or not.
> >>>>>>>>>>
> >>>>>>>>>> I can see code is already developed, and it is difficult to
> >>>>>>>>>> restructure developed code, but restructure it into small
> >>>>>>>>>> pieces really helps for reviews.
> >>>>>>>>>>
> >>>>>>>>>>
> >>>>>>>>>> Driver supports good list of features, can it be possible to
> >>>>>>>>>> distribute upstream effort into multiple release.
> >>>>>>>>>> Starting from basic functionality and add features gradually.
> >>>>>>>>>> Target for this release can be providing datapath, and add
> >>>>>>>>>> more if we have time in the release, what do you think?
> >>>>>>
> >>>>>> I was expecting to get only Rx/Tx in this release, not really more.
> >>>>>>
> >>>>>> I agree it may be interesting to discuss some design and check
> >>>>>> whether we need more features in ethdev as part of the driver
> >>>>>> upstreaming process.
> >>>>>>
> >>>>>>
> >>>>>>>>>> Also there are large amount of base code (HAL / FPGA code),
> >>>>>>>>>> instead of adding them as a bulk, relevant ones with a
> >>>>>>>>>> feature can be added with the feature patch, this eliminates
> >>>>>>>>>> dead code in the base code layer, also helps user/review to
> >>>>>>>>>> understand the link between driver code and base code.
> >>>>>>
> >>>>>> Yes it would be interesting to see what is really needed for the
> >>>>>> basic initialization and what is linked to a specific offload or
> configuration feature.
> >>>>>>
> >>>>>> As a maintainer, I have to do some changes across all drivers
> >>>>>> sometimes, and I use git blame a lot to understand why something
> was added.
> >>>>>>
> >>>>>>
> >>>>>>>>> Jumping in here with an opinion about welcoming new NIC
> vendors to the community:
> >>>>>>>>>
> >>>>>>>>> Generally, if a NIC vendor supplies a PMD for their NIC, I expect the
> vendor to take responsibility for the quality of the PMD, including providing a
> maintainer and support backporting of fixes to the PMD in LTS releases. This
> should align with the vendor's business case for upstreaming their driver.
> >>>>>>>>>
> >>>>>>>>> If the vendor provides one big patch series, which may be difficult
> to understand/review, the fallout mainly hits the vendor's customers (and
> thus the vendor's support organization), not the community as a whole.
> >>>>>>>>>
> >>>>>>>>
> >>>>>>>> Hi Morten,
> >>>>>>>>
> >>>>>>>> I was thinking same before making my above comment, what
> happens if vendors submit as one big patch and when a problem occurs we
> can ask owner to fix. Probably this makes vendor happy and makes my life (or
> any other maintainer's life) easier, it is always easier to say yes.
> >>>>>>>>
> >>>>>>>>
> >>>>>>>> But I come up with two main reasons to ask for a rework:
> >>>>>>>>
> >>>>>>>> 1- Technically any vendor can deliver their software to their
> >>>>>>>> customers via a public git repository, they don't have to
> >>>>>>>> upstream to
> >>>>>>>>
> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fdpdk.org&c=
> >>>>>>>> E
> >>>>>>>> ,1,N
> >>>>>>>>
> poJejuuvPdOPfcFJYtsmkQF6PVrDjGsZ8x_gi5xDrTyZokK_nM11u4ZpzHgM10J
> >>>>>>>> 9 bOLl nhoR6fFAzWtCzOhRCzVruYj520zZORv6-
> MjJeSC5TrGnIFL&typo=1,
> >>>>>>>> but upstreaming has many benefits.
> >>>>>>>>
> >>>>>>>> One of those benefits is upstreaming provides a quality assurance
> for vendor's customers (that is why customer can be asking for this, as we are
> having in many cases), and this quality assurance comes from additional eyes
> reviewing the code and guiding vendors for the DPDK quality standards (some
> vendors already doing pretty good, but new ones sometimes requires hand-
> holding).
> >>>>>>>>
> >>>>>>>> If driver is one big patch series, it is practically not possible to review
> it, I can catch a few bits here or there, you may some others, but practically it
> will be merged without review, and we will fail on our quality assurance task.
> >>>>>>>>
> >>>>>>>> 2- Make code more accessible to the rest of the world.
> >>>>>>>>
> >>>>>>>> When it is a big patch, code can be functional but lots of details,
> reasoning, relation between components gets lost, which makes it even harder
> for an external developer, like me, to understand it (I am a mere guinea pig
> here :).
> >>>>>>>>
> >>>>>>>> If a customer would like to add a feature themselves, or fix
> something, even after vendor no more working on that product anymore,
> customer needs to understand the code or some reasoning in the code.
> >>>>>>>> Or if someone wants to backport the driver to rust, or a DPDK
> developer wants to do a rework that requires updating all drivers, or a tester
> would like to analyze the code to figure out behavior difference of the devices.
> I think I have witness all above cases in real life.
> >>>>>>>>
> >>>>>>>> If driver is split into more patches, it makes patch easier to
> understand which makes code practically more accessible to other developers
> that are not expert in driver.
> >>>>>>
> >>>>>> I fully agree about the 2 reasons for upstreaming piece by piece.
> >>>>>>
> >>>>>>
> >>>>>>>> Overall, yes splitting patch takes time and effort, and yes this is an
> overhead for a code that is already developed, but I think benefit is big so it
> worth doing the task.
> >>>>>>
> >>>>>> In the meantime, if some features are not yet upstreamed in a
> >>>>>> release, a user can apply the missing patches from the mailing list to
> get the features.
> >>>>>>
> >>>>>>
> >>>>>>>>> We, the community, should not make it too difficult for vendors
> trying to upstream their drivers. I certainly consider it unreasonable to ask a
> vendor to postpone the release of some existing features by effectively an
> entire year (considering that only LTS releases are relevant for most of us)
> because we want the vendor to refactor the patch series to match our
> preferences within an unrealistic timeframe.
> >>>>>>
> >>>>>> You're right Morten, we try to be as welcoming as possible, but
> >>>>>> as Ferruh said, we want to be able to understand how a driver is
> >>>>>> built, even if not understanding all details.
> >>>>>>
> >>>>>> In Open Source, I think not only the code should be available, we
> >>>>>> must also take care of explanations and documentation.
> >>>>>>
> >>>>>>
> >>>>>>>> Agree to not make upstreaming difficult for new vendors, and
> indeed we are encouraging more vendors to be upstream their code, this is in
> best interest of both sides.
> >>>>>>>>
> >>>>>>>> Distributing upstreaming effort to a year was just a suggestion, it can
> go in earlier as it is becomes ready but I can see it will take time to split driver
> into features and upstream them.
> >>>>>>
> >>>>>> Driver features can be added until -rc2 (in one month).
> >>>>>>
> >>>>>>
> >>>>>>>> As I am from a vendor too, I can understand the product/customer
> pressure, but I hope this approach can encourage vendors start upstreaming
> early or even better upstream as they develop the code.
> >>>>>>>
> >>>>>>> Hi Ferruh,
> >>>>>>>
> >>>>>>> First of all, thank you for starting the work to review our code.
> >>>>>>>
> >>>>>>> As Morten said Napatech plans to take all responsibility for the
> >>>>>>> quality of the PMD source code. We expect to provide all fixes
> >>>>>>> needed in the future. If for some reason Napatech stops
> >>>>>>> maintaining the code, then we have been informed that the DPDK
> >>>>>>> community might delete the PMD from the repository, and we
> understand that.
> >>>>>>>
> >>>>>>> In regards to splitting the code, I don't see this a good option.
> >>>>>>> While I of cause agree it would be easier to review and
> >>>>>>> understand, the code should also result in a meaningful product.
> >>>>>>> Of the 87k lines of code, 53k lines is needed to start-up the
> >>>>>>> FPGA to a state the it is ready to receive traffic. But at this
> >>>>>>> point all packets would simply be discarded, and to be honest,
> >>>>>>> there are better and cheaper options out there, if nothing more
> >>>>>>> than basic functionality is needed. 34k lines are used to setup
> >>>>>>> filters based on rte_flow. The thing is, that you need to
> >>>>>>> initialize all modules in the FPGA TX- and RX-pipelines with valid data,
> even if you don't need the features those modules provide.
> >>>>>>> As a result, if you split up the 34k lines, then the product
> >>>>>>> would not be functional. Of cause some of the top level logic
> >>>>>>> could be split out, but at this point we are talking about
> >>>>>>> splitting 87k lines into 80k and 7k, which I don't think is worth it.
> >>>>>>
> >>>>>> Actually I think it is worth.
> >>>>>> There is a benefit in isolating the small basic init part from
> >>>>>> the more complex features.
> >>>>>>
> >>>>>>
> >>>>>>>>>> As far as I understand last patch opens a socket interface
> >>>>>>>>>> and an external application can sent control commands via this
> interface.
> >>>>>>>>>> I am not sure about this side control channel, what is
> >>>>>>>>>> missing in the DPDK API? Can we try to address them in the
> >>>>>>>>>> DPDK layer instead of a driver specific solution?
> >>>>>>>>>
> >>>>>>>>> That would be great.
> >>>>>>>>>
> >>>>>>>>> AFAIK, other vendors also has a bunch of out-of-band
> >>>>>>>>> communication, e.g. magical EAL parameters to the MLX drivers.
> >>>>>>>>> So let's not be too hard on the newcomers. ;-)
> >>>>>>>>>
> >>>>>>>>
> >>>>>>>> I did some thinking for this one too,
> >>>>>>>>
> >>>>>>>> As we are in userspace, it is easy to have side control channel, and
> this can make users life easy, so this is a practical thing to do.
> >>>>>>>> (Indeed there are already some ways to do this, without PMD
> >>>>>>>> exposing a socket interface.)
> >>>>>>>>
> >>>>>>>> But this also reduces effort developers putting on DPDK layer
> solution, because it is always easier to add more support to the driver only.
> >>>>>>>> And overall this reduces portability of the DPDK application,
> >>>>>>>> each application becomes unique to a device (This is a bad
> >>>>>>>> thing, but I also need some feedback how bad it is in real
> >>>>>>>> life.)
> >>>>>>>>
> >>>>>>>> To balance this, we said if a feature is too specific to a device, it can
> add device specific API and this is better than device specific features pollute
> the common, most used code. And push back to introduce more new PMD
> specific APIs unless it is really needed.
> >>>>>>>>
> >>>>>>>> But creating a socket interface directly from the driver is more than
> PMD specific API. Technically application control interface can rely completely
> to this. Even we assume this is not for control, but just for debug, I can see it
> can be useful for debug and again practical thing to do, I am still not sure how
> much it hurts if each driver has a custom socket interface for their debug
> needs.
> >>>>>>>>
> >>>>>>>> Overall it makes more sense to me to have a unified/common
> interface from drivers to DPDK applications, which is through the ethdev layer.
> >>>>>>>> And improve and extend the ethdev layer to satisfy driver needs.
> >>>>>>>>
> >>>>>>>> In this specific example, I am for rejecting the socket interface patch,
> but I would like to get more feedback from @techboard.
> >>>>>>>>
> >>>>>>>
> >>>>>>> The reason we have the addition control channel is not provide
> >>>>>>> additional functionality. We have customers with use-cases that
> >>>>>>> require multiple processes. Since Napatech adapters do not
> >>>>>>> support configuration through VFs, then secondary applications
> >>>>>>> must send their rte_flow to a main application, which will then
> >>>>>>> setup the flow through it's PF. This control channel "hides"
> >>>>>>> these details, and make the product easier for users to adapt to their
> existing solutions.
> >>>>>>
> >>>>>> I think you need to explore VF representors.
> >>>>>> This is what is done with other drivers, and it make them compatible.
> >>>>>>
> >>>>>>> If you stand firm on rejecting the control channel, then we have
> >>>>>>> to go back to the drawing board on this issue. We did look at
> >>>>>>> DPDK's multi-process support, and actually had some support for
> >>>>>>> this, but we determined that for our use-case it was better to
> >>>>>>> have a communication channel, and no shared memory.
> >>>>>>
> >>>>>> I'm not sure your need is about secondary process.
> >>>>>> Let's discuss this need in a meeting if needed.
> >>>>>> Anyway, the message is that we want to be part of such design
> decision.
> >>>>>>
> >>>>>>
> >>>>>>>> And related to not being too hard on the newcomers, unrelated to
> being a newcomer or not, if a process/feature/approach approved once, some
> others will point to it and will ask to do the same which is fair in their
> perspective. I had multiple instance of this in the past.
> >>>>>>>>
> >>>>>>>> Of course we are being easy to newcomers but not in a way to
> >>>>>>>> allow code that we believe is not good thing to do, but going
> >>>>>>>> easy on process may be :)
> >>>>>>>>
> >>>>>>>
> >>>>>>> We are grateful for any leniency you may show us ;-)
> >>>>>>>
> >>>>>>> Thanks again,
> >>>>>>> Christian
> >>>>>>>
> >>>>>>>>
> >>>>>>>>>>
> >>>>>>>>>>
> >>>>>>>>>> Thanks,
> >>>>>>>>>> ferruh
> >>>>>>>>>
> >>>>>>>>> Thank you, Ferruh, for taking good care of the community by
> providing constructive feedback like this to new NIC vendors!
> >>>>>>>>>
> >>>>>>>>> Please note that my feedback is entirely process related. I didn’t
> review the driver, so I have no technical comments to the patch series.
> >>>>>>>>>
> >>>>>>>>> -Morten
> >>>>>>
> >>>>>>
> >>>>>> We are going to discuss the process in the technical board today.
> >>>>>>
> >>>>>>
> >>>>>
> >>>>> Hi Mykola, Christiam,
> >>>>>
> >>>>> As discussed, following are a few good examples from the DPDK history,
> there is no "fits all, fixed guidelines", but they can serve as samples:
> >>>>>
> >>>>> Marvell cnxk:
> >>>>>
> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
> >>>>>
> org%2fproject%2fdpdk%2flist%2f%3fseries%3d17449%26state%3d%252A%
> 26
> >>>>> a
> >>>>> rchive%3dboth&c=E,1,DmXU0iHwXoSaZ4bKn-
> yhX9J8XmFBispd2ut7pxLNBkK3Q4
> >>>>> L
> >>>>> VpG_zmOf1jnWSS-Y0Fx-
> TNbPnQDHyBZkDj23Gu7zjPZ5nsA7pid5CsE2vxNk,&typo
> >>>>> =
> >>>>> 1
> >>>>>
> >>>>>
> >>>>> Solarflare sfc (before patchwork series support):
> >>>>>
> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
> >>>>> org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-2-git-send-
> email-a
> >>>>> r
> >>>>>
> ybchenko%40solarflare.com%2f&c=E,1,E9oUT_1WuNC2JA8x7an3rC_Pm5g1
> L5c
> >>>>> x
> >>>>>
> JKQ6pTwSbCWSJpiLH2GnmgfFkUqViOOwkpS2df8kgBvHjmulKaWhyr4BBizUT
> -sL5L
> >>>>> J
> >>>>> v21Hx4RtHtK3vjhcKpg,,&typo=1
> >>>>> to
> >>>>>
> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
> >>>>> org%2fproject%2fdpdk%2fpatch%2f1480436367-20749-56-git-send-
> email-
> >>>>> a
> >>>>>
> rybchenko%40solarflare.com%2f&c=E,1,GByF_TiC_q11iVPpiPgpCMlSge-J0X
> >>>>> f
> >>>>> T0zHkriK0rde1Qt1RG7uf6mETQkTSQ-
> 1V86Z7EtRcxlvSsed1sqn8RWfN8KFSbd7Na
> >>>>> A
> >>>>> kfbDiehn_vSRzja45rQgv53Q,,&typo=1
> >>>>>
> >>>>>
> >>>>> Intel ice:
> >>>>>
> https://linkprotect.cudasvc.com/url?a=https%3a%2f%2fpatchwork.dpdk.
> >>>>>
> org%2fproject%2fdpdk%2flist%2f%3fseries%3d2842%26state%3d%252A%2
> 6a
> >>>>> r
> >>>>> chive%3dboth&c=E,1,zQwvAIR3ToLIhT09bVxm_HEF-
> dp8eyTqhsKB3eOYgIJdd2W
> >>>>> S
> >>>>> _0ZlTbQKfr9KLyTA3A2A2HzBbjIlz21D_hWVgS_INmmC5eew1J0QBH-
> PoRNd&typo=
> >>>>> 1
> >>>>>
> >>>>
> >>>> Thank you for the links, they have been very helpful.
> >>>>
> >>>> After a lot of internal discussion, Napatech has decided to implement
> some architectural changes to our PMD that will allow us to easier split up the
> code into smaller features. The work will require some time, which means that
> we will not be ready for the 23.11 release. The current goal is to attempt to
> upstream a quite basic PMD in time for 24.7, and a fully featured PMD for
> 24.11.
> >>>>
> >>>>
> >>>
> >>> Hi Christiam,
> >>>
> >>> Good to see there is a solid plan for upstreaming but also not that
> >>> good that it is postponed,
> >>>
> >>> I am aware it is all tied to your internal planning/resourcing etc,
> >>> but since the effort already started, can it be possible to squeeze
> >>> very basic driver in this release, which just does link up and most basic
> Rx/Tx?
> >>> It gives opportunity to experiment on device to users.
> >>>
> >>> We can accept it up to -rc3, which is end of October, so there is
> >>> still some time?
> >>>
> >>> This is just a suggestion though, no pressure intended.
> >>
> >> I agree with Ferruh, better to start early and small.
> >> It shouldn't be too hard to introduce the skeleton of the driver.
> >
> > Hi Ferruh and Thomas,
> >
> > My apologies for the late response, I have been sick the last week.
> >
> > We can try to create a small PMD in time. The reason I'm cautious is because
> Napatech plan to make quite large changes to the PMD, to achieve a more
> stable and modular code-base. This means that future updates will have quite
> large diffs, until these changes are in place.
> >
> >
> 
> Hi Christian, Mykola,
> 
> What is the status of the 'ntnic'?
> Will there be some upstreaming effort for v24.07?


^ permalink raw reply	[flat|nested] 142+ messages in thread

* Re: [PATCH v16 1/8] net/ntnic: initial commit which adds register defines
  2024-04-03 10:55                         ` Mykola Kostenok
@ 2024-04-04 12:49                           ` Ferruh Yigit
  0 siblings, 0 replies; 142+ messages in thread
From: Ferruh Yigit @ 2024-04-04 12:49 UTC (permalink / raw)
  To: Mykola Kostenok, Christian Koue Muf, Thomas Monjalon, Morten Brørup
  Cc: dev, andrew.rybchenko, techboard

On 4/3/2024 11:55 AM, Mykola Kostenok wrote:

<...>

>>
>> Hi Christian, Mykola,
>>
>> What is the status of the 'ntnic'?
>> Will there be some upstreaming effort for v24.07?
> 
> Hi, Ferruh.
> Yes, we plan to send new 'ntnic' patch series into v24.07
> Right now, we are doing refactoring of 'ntnic' code to make it modular. So, we will send it split into smaller patches.
> 
> 

Great, thanks for the update.
Just a reminder that you can start small, no need to wait all features
to be ready.


^ permalink raw reply	[flat|nested] 142+ messages in thread

end of thread, other threads:[~2024-04-04 12:50 UTC | newest]

Thread overview: 142+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-16 13:25 [PATCH 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-08-16 13:25 ` [PATCH 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-08-16 13:25 ` [PATCH 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-08-16 13:25 ` [PATCH 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-08-16 13:25 ` [PATCH 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-08-16 13:25 ` [PATCH 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-08-16 13:25 ` [PATCH 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-08-16 13:25 ` [PATCH 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-08-16 14:46   ` Stephen Hemminger
2023-08-25 13:52     ` Christian Koue Muf
2023-08-16 14:47   ` Stephen Hemminger
2023-08-17 14:43 ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-08-17 14:43   ` [PATCH v2 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-08-17 14:43   ` [PATCH v2 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-08-17 14:43   ` [PATCH v2 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-08-17 14:43   ` [PATCH v2 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-08-17 14:43   ` [PATCH v2 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-08-17 14:43   ` [PATCH v2 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-08-17 14:43   ` [PATCH v2 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-08-17 22:08   ` [PATCH v2 1/8] net/ntnic: initial commit which adds register defines Tyler Retzlaff
2023-08-18 11:01     ` Mykola Kostenok
2023-08-18 18:41 ` [PATCH v4 " Mykola Kostenok
2023-08-18 18:41   ` [PATCH v4 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-08-18 18:41   ` [PATCH v4 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-08-18 18:41   ` [PATCH v4 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-08-18 18:41   ` [PATCH v4 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-08-18 18:41   ` [PATCH v4 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-08-18 18:41   ` [PATCH v4 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-08-18 18:41   ` [PATCH v4 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-08-21 11:34 ` [PATCH v5 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-08-21 11:34   ` [PATCH v5 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-08-21 11:34   ` [PATCH v5 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-08-21 11:34   ` [PATCH v5 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-08-21 11:34   ` [PATCH v5 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-08-21 11:34   ` [PATCH v5 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-08-21 11:34   ` [PATCH v5 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-08-21 11:34   ` [PATCH v5 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-08-21 13:54 ` [PATCH v6 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-08-21 13:54   ` [PATCH v6 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-08-21 13:54   ` [PATCH v6 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-08-21 13:54   ` [PATCH v6 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-08-21 13:54   ` [PATCH v6 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-08-21 13:54   ` [PATCH v6 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-08-21 13:54   ` [PATCH v6 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-08-21 13:54   ` [PATCH v6 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-08-22 15:41 ` [PATCH v7 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-08-22 15:41   ` [PATCH v7 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-08-22 15:41   ` [PATCH v7 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-08-22 15:41   ` [PATCH v7 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-08-22 15:41   ` [PATCH v7 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-08-22 15:41   ` [PATCH v7 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-08-22 15:41   ` [PATCH v7 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-08-29  8:13     ` David Marchand
2023-08-22 15:41   ` [PATCH v7 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-08-29  8:15 ` [PATCH v8 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-08-29  8:15   ` [PATCH v8 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-08-29  8:15   ` [PATCH v8 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-08-29  8:15   ` [PATCH v8 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-08-29  8:15   ` [PATCH v8 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-08-29  8:15   ` [PATCH v8 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-08-29  8:15   ` [PATCH v8 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-08-29  8:15   ` [PATCH v8 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-08-29 10:17 ` [PATCH v9 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-08-29 10:17   ` [PATCH v9 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-08-29 10:17   ` [PATCH v9 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-08-29 10:17   ` [PATCH v9 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-08-29 10:17   ` [PATCH v9 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-08-29 10:17   ` [PATCH v9 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-08-29 10:17   ` [PATCH v9 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-08-29 10:17   ` [PATCH v9 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-08-30 16:51 ` [PATCH v10 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-08-30 16:51   ` [PATCH v10 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-08-30 16:51   ` [PATCH v10 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-08-30 16:51   ` [PATCH v10 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-08-30 16:51   ` [PATCH v10 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-08-30 16:51   ` [PATCH v10 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-08-30 16:51   ` [PATCH v10 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-08-30 16:51   ` [PATCH v10 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-08-31 12:23 ` [PATCH v11 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-08-31 12:23   ` [PATCH v11 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-08-31 12:23   ` [PATCH v11 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-08-31 12:23   ` [PATCH v11 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-08-31 12:23   ` [PATCH v11 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-08-31 12:23   ` [PATCH v11 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-08-31 12:23   ` [PATCH v11 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-08-31 12:23   ` [PATCH v11 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-08-31 13:51 ` [PATCH v12 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-08-31 13:51   ` [PATCH v12 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-08-31 13:51   ` [PATCH v12 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-08-31 13:51   ` [PATCH v12 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-08-31 13:51   ` [PATCH v12 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-08-31 13:51   ` [PATCH v12 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-08-31 13:51   ` [PATCH v12 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-08-31 13:51   ` [PATCH v12 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-09-01 12:18 ` [PATCH v13 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-09-01 12:18   ` [PATCH v13 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-09-01 12:18   ` [PATCH v13 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-09-01 12:18   ` [PATCH v13 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-09-01 12:18   ` [PATCH v13 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-09-01 12:18   ` [PATCH v13 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-09-01 12:18   ` [PATCH v13 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-09-01 12:18   ` [PATCH v13 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-09-02 17:26     ` Patrick Robb
2023-09-04 13:53 ` [PATCH v14 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-09-04 13:53   ` [PATCH v14 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-09-04 13:53   ` [PATCH v14 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-09-04 13:53   ` [PATCH v14 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-09-04 13:53   ` [PATCH v14 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-09-04 13:53   ` [PATCH v14 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-09-04 13:54   ` [PATCH v14 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-09-04 13:54   ` [PATCH v14 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-09-05 14:54 ` [PATCH v15 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-09-05 14:54   ` [PATCH v15 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-09-05 14:54   ` [PATCH v15 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-09-05 14:54   ` [PATCH v15 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-09-05 14:54   ` [PATCH v15 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-09-05 14:54   ` [PATCH v15 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-09-05 14:54   ` [PATCH v15 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-09-05 14:54   ` [PATCH v15 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-09-08 16:07 ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Mykola Kostenok
2023-09-08 16:07   ` [PATCH v16 2/8] net/ntnic: adds core registers and fpga functionality Mykola Kostenok
2023-09-08 16:07   ` [PATCH v16 3/8] net/ntnic: adds NT200A02 adapter support Mykola Kostenok
2023-09-08 16:07   ` [PATCH v16 4/8] net/ntnic: adds flow related FPGA functionality Mykola Kostenok
2023-09-08 16:07   ` [PATCH v16 5/8] net/ntnic: adds FPGA abstraction layer Mykola Kostenok
2023-09-08 16:07   ` [PATCH v16 6/8] net/ntnic: adds flow logic Mykola Kostenok
2023-09-08 16:07   ` [PATCH v16 7/8] net/ntnic: adds ethdev and makes PMD available Mykola Kostenok
2023-09-08 16:07   ` [PATCH v16 8/8] net/ntnic: adds socket connection to PMD Mykola Kostenok
2023-09-15 15:54   ` [PATCH v16 1/8] net/ntnic: initial commit which adds register defines Ferruh Yigit
2023-09-15 18:37     ` Morten Brørup
2023-09-18  9:33       ` Ferruh Yigit
2023-09-19  9:06         ` Christian Koue Muf
2023-09-20  9:48           ` Ferruh Yigit
2023-09-20 13:17           ` Thomas Monjalon
2023-09-21 14:04             ` Ferruh Yigit
2023-09-29  9:21               ` Christian Koue Muf
2023-09-29  9:46                 ` Ferruh Yigit
2023-09-29 10:23                   ` Thomas Monjalon
2023-10-09  7:57                     ` Christian Koue Muf
2023-10-09  9:52                       ` Ferruh Yigit
2024-03-29 11:24                       ` Ferruh Yigit
2024-04-03 10:55                         ` Mykola Kostenok
2024-04-04 12:49                           ` Ferruh Yigit

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.